Merge V8 5.5.372.32

Test: Manual, built and ran D8

Change-Id: I831a5491f74342c2675bb6fe1e24a2258e493758
diff --git a/src/address-map.cc b/src/address-map.cc
index 61292bf..3122b33 100644
--- a/src/address-map.cc
+++ b/src/address-map.cc
@@ -13,7 +13,7 @@
 RootIndexMap::RootIndexMap(Isolate* isolate) {
   map_ = isolate->root_index_map();
   if (map_ != NULL) return;
-  map_ = new base::HashMap(base::HashMap::PointersMatch);
+  map_ = new base::HashMap();
   for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
     Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
     Object* root = isolate->heap()->root(root_index);
diff --git a/src/address-map.h b/src/address-map.h
index 0ce93d2..95e9cb0 100644
--- a/src/address-map.h
+++ b/src/address-map.h
@@ -189,9 +189,7 @@
 class SerializerReferenceMap : public AddressMapBase {
  public:
   SerializerReferenceMap()
-      : no_allocation_(),
-        map_(base::HashMap::PointersMatch),
-        attached_reference_index_(0) {}
+      : no_allocation_(), map_(), attached_reference_index_(0) {}
 
   SerializerReference Lookup(HeapObject* obj) {
     base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
diff --git a/src/allocation.h b/src/allocation.h
index 8581cc9..a92b71f 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -13,10 +13,10 @@
 // Called when allocation routines fail to allocate.
 // This function should not return, but should terminate the current
 // processing.
-void FatalProcessOutOfMemory(const char* message);
+V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char* message);
 
 // Superclass for classes managed with new & delete.
-class Malloced {
+class V8_EXPORT_PRIVATE Malloced {
  public:
   void* operator new(size_t size) { return New(size); }
   void  operator delete(void* p) { Delete(p); }
@@ -72,7 +72,7 @@
 // The normal strdup functions use malloc.  These versions of StrDup
 // and StrNDup uses new and calls the FatalProcessOutOfMemory handler
 // if allocation fails.
-char* StrDup(const char* str);
+V8_EXPORT_PRIVATE char* StrDup(const char* str);
 char* StrNDup(const char* str, int n);
 
 
diff --git a/src/api-arguments-inl.h b/src/api-arguments-inl.h
index eefdf35..bf72fc4 100644
--- a/src/api-arguments-inl.h
+++ b/src/api-arguments-inl.h
@@ -20,8 +20,6 @@
                                                          Handle<Name> name) { \
     Isolate* isolate = this->isolate();                                       \
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);        \
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                            \
-        isolate, &tracing::TraceEventStatsTable::Function);                   \
     VMState<EXTERNAL> state(isolate);                                         \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));              \
     PropertyCallbackInfo<ApiReturn> info(begin());                            \
@@ -46,8 +44,6 @@
                                                          uint32_t index) { \
     Isolate* isolate = this->isolate();                                    \
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);     \
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                         \
-        isolate, &tracing::TraceEventStatsTable::Function);                \
     VMState<EXTERNAL> state(isolate);                                      \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));           \
     PropertyCallbackInfo<ApiReturn> info(begin());                         \
@@ -68,9 +64,6 @@
   Isolate* isolate = this->isolate();
   RuntimeCallTimerScope timer(
       isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate,
-      &tracing::TraceEventStatsTable::GenericNamedPropertySetterCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   PropertyCallbackInfo<v8::Value> info(begin());
@@ -80,14 +73,27 @@
   return GetReturnValue<Object>(isolate);
 }
 
+Handle<Object> PropertyCallbackArguments::Call(
+    GenericNamedPropertyDefinerCallback f, Handle<Name> name,
+    const v8::PropertyDescriptor& desc) {
+  Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<v8::Value> info(begin());
+  LOG(isolate,
+      ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
+  f(v8::Utils::ToLocal(name), desc, info);
+  return GetReturnValue<Object>(isolate);
+}
+
 Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
                                                uint32_t index,
                                                Handle<Object> value) {
   Isolate* isolate = this->isolate();
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::IndexedPropertySetterCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::IndexedPropertySetterCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   PropertyCallbackInfo<v8::Value> info(begin());
@@ -97,13 +103,26 @@
   return GetReturnValue<Object>(isolate);
 }
 
+Handle<Object> PropertyCallbackArguments::Call(
+    IndexedPropertyDefinerCallback f, uint32_t index,
+    const v8::PropertyDescriptor& desc) {
+  Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(
+      isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<v8::Value> info(begin());
+  LOG(isolate,
+      ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
+  f(index, desc, info);
+  return GetReturnValue<Object>(isolate);
+}
+
 void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
                                      Handle<Name> name, Handle<Object> value) {
   Isolate* isolate = this->isolate();
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::AccessorNameSetterCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::AccessorNameSetterCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   PropertyCallbackInfo<void> info(begin());
diff --git a/src/api-arguments.cc b/src/api-arguments.cc
index 6e347c7..f8d6c8f 100644
--- a/src/api-arguments.cc
+++ b/src/api-arguments.cc
@@ -13,8 +13,6 @@
 Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
   Isolate* isolate = this->isolate();
   RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &internal::tracing::TraceEventStatsTable::FunctionCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
@@ -26,8 +24,6 @@
     IndexedPropertyEnumeratorCallback f) {
   Isolate* isolate = this->isolate();
   RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &internal::tracing::TraceEventStatsTable::PropertyCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   PropertyCallbackInfo<v8::Array> info(begin());
diff --git a/src/api-arguments.h b/src/api-arguments.h
index 0dfe618..9e01f3a 100644
--- a/src/api-arguments.h
+++ b/src/api-arguments.h
@@ -119,9 +119,16 @@
   inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
                              Handle<Name> name, Handle<Object> value);
 
+  inline Handle<Object> Call(GenericNamedPropertyDefinerCallback f,
+                             Handle<Name> name,
+                             const v8::PropertyDescriptor& desc);
+
   inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
                              Handle<Object> value);
 
+  inline Handle<Object> Call(IndexedPropertyDefinerCallback f, uint32_t index,
+                             const v8::PropertyDescriptor& desc);
+
   inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
                    Handle<Object> value);
 
diff --git a/src/api-natives.cc b/src/api-natives.cc
index 0f3c3b6..ea2cce5 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -17,42 +17,39 @@
 
 class InvokeScope {
  public:
-  explicit InvokeScope(Isolate* isolate) : save_context_(isolate) {}
+  explicit InvokeScope(Isolate* isolate)
+      : isolate_(isolate), save_context_(isolate) {}
   ~InvokeScope() {
-    Isolate* isolate = save_context_.isolate();
-    bool has_exception = isolate->has_pending_exception();
+    bool has_exception = isolate_->has_pending_exception();
     if (has_exception) {
-      isolate->ReportPendingMessages();
+      isolate_->ReportPendingMessages();
     } else {
-      isolate->clear_pending_message();
+      isolate_->clear_pending_message();
     }
   }
 
  private:
+  Isolate* isolate_;
   SaveContext save_context_;
 };
 
-enum class CacheCheck { kCheck, kSkip };
+MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
+                                        Handle<ObjectTemplateInfo> data,
+                                        Handle<JSReceiver> new_target,
+                                        bool is_hidden_prototype);
 
-MaybeHandle<JSObject> InstantiateObject(
-    Isolate* isolate, Handle<ObjectTemplateInfo> data,
-    Handle<JSReceiver> new_target, CacheCheck cache_check = CacheCheck::kCheck,
-    bool is_hidden_prototype = false);
-
-MaybeHandle<JSFunction> InstantiateFunction(
-    Isolate* isolate, Handle<FunctionTemplateInfo> data,
-    CacheCheck cache_check = CacheCheck::kCheck,
-    Handle<Name> name = Handle<Name>());
+MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
+                                            Handle<FunctionTemplateInfo> data,
+                                            Handle<Name> name = Handle<Name>());
 
 MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
                                 Handle<Name> name = Handle<Name>()) {
   if (data->IsFunctionTemplateInfo()) {
     return InstantiateFunction(isolate,
-                               Handle<FunctionTemplateInfo>::cast(data),
-                               CacheCheck::kCheck, name);
+                               Handle<FunctionTemplateInfo>::cast(data), name);
   } else if (data->IsObjectTemplateInfo()) {
     return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
-                             Handle<JSReceiver>());
+                             Handle<JSReceiver>(), false);
   } else {
     return data;
   }
@@ -199,15 +196,14 @@
     Handle<FixedArray> array =
         isolate->factory()->NewFixedArray(max_number_of_properties);
 
-    info = *data;
-    while (info != nullptr) {
+    for (Handle<TemplateInfoT> temp(*data); *temp != nullptr;
+         temp = handle(temp->GetParent(isolate), isolate)) {
       // Accumulate accessors.
-      Object* maybe_properties = info->property_accessors();
+      Object* maybe_properties = temp->property_accessors();
       if (!maybe_properties->IsUndefined(isolate)) {
         valid_descriptors = AccessorInfo::AppendUnique(
             handle(maybe_properties, isolate), array, valid_descriptors);
       }
-      info = info->GetParent(isolate);
     }
 
     // Install accumulated accessors.
@@ -339,17 +335,9 @@
   return fun->context()->native_context() == isolate->raw_native_context();
 }
 
-MaybeHandle<JSObject> InstantiateObjectWithInvokeScope(
-    Isolate* isolate, Handle<ObjectTemplateInfo> info,
-    Handle<JSReceiver> new_target) {
-  InvokeScope invoke_scope(isolate);
-  return InstantiateObject(isolate, info, new_target, CacheCheck::kSkip);
-}
-
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
                                         Handle<ObjectTemplateInfo> info,
                                         Handle<JSReceiver> new_target,
-                                        CacheCheck cache_check,
                                         bool is_hidden_prototype) {
   Handle<JSFunction> constructor;
   int serial_number = Smi::cast(info->serial_number())->value();
@@ -363,7 +351,7 @@
   }
   // Fast path.
   Handle<JSObject> result;
-  if (serial_number && cache_check == CacheCheck::kCheck) {
+  if (serial_number) {
     if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
       return isolate->factory()->CopyJSObject(result);
     }
@@ -397,6 +385,7 @@
   if (info->immutable_proto()) {
     JSObject::SetImmutableProto(object);
   }
+  // TODO(dcarney): is this necessary?
   JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
 
   if (serial_number) {
@@ -406,18 +395,12 @@
   return result;
 }
 
-MaybeHandle<JSFunction> InstantiateFunctionWithInvokeScope(
-    Isolate* isolate, Handle<FunctionTemplateInfo> info) {
-  InvokeScope invoke_scope(isolate);
-  return InstantiateFunction(isolate, info, CacheCheck::kSkip);
-}
 
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
                                             Handle<FunctionTemplateInfo> data,
-                                            CacheCheck cache_check,
                                             Handle<Name> name) {
   int serial_number = Smi::cast(data->serial_number())->value();
-  if (serial_number && cache_check == CacheCheck::kCheck) {
+  if (serial_number) {
     Handle<JSObject> result;
     if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
       return Handle<JSFunction>::cast(result);
@@ -434,8 +417,7 @@
           InstantiateObject(
               isolate,
               handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
-              Handle<JSReceiver>(), CacheCheck::kCheck,
-              data->hidden_prototype()),
+              Handle<JSReceiver>(), data->hidden_prototype()),
           JSFunction);
     }
     Object* parent = data->parent_template();
@@ -505,31 +487,17 @@
 }  // namespace
 
 MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
-    Handle<FunctionTemplateInfo> info) {
-  Isolate* isolate = info->GetIsolate();
-  int serial_number = Smi::cast(info->serial_number())->value();
-  if (serial_number) {
-    Handle<JSObject> result;
-    if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
-      return Handle<JSFunction>::cast(result);
-    }
-  }
-  return InstantiateFunctionWithInvokeScope(isolate, info);
+    Handle<FunctionTemplateInfo> data) {
+  Isolate* isolate = data->GetIsolate();
+  InvokeScope invoke_scope(isolate);
+  return ::v8::internal::InstantiateFunction(isolate, data);
 }
 
 MaybeHandle<JSObject> ApiNatives::InstantiateObject(
-    Handle<ObjectTemplateInfo> info, Handle<JSReceiver> new_target) {
-  Isolate* isolate = info->GetIsolate();
-  int serial_number = Smi::cast(info->serial_number())->value();
-  if (serial_number && !new_target.is_null() &&
-      IsSimpleInstantiation(isolate, *info, *new_target)) {
-    // Fast path.
-    Handle<JSObject> result;
-    if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
-      return isolate->factory()->CopyJSObject(result);
-    }
-  }
-  return InstantiateObjectWithInvokeScope(isolate, info, new_target);
+    Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
+  Isolate* isolate = data->GetIsolate();
+  InvokeScope invoke_scope(isolate);
+  return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
 }
 
 MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
diff --git a/src/api.cc b/src/api.cc
index 6858a32..44933b9 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -24,6 +24,7 @@
 #include "src/base/functional.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/time.h"
+#include "src/base/safe_conversions.h"
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
 #include "src/char-predicates-inl.h"
@@ -68,6 +69,7 @@
 #include "src/unicode-inl.h"
 #include "src/v8.h"
 #include "src/v8threads.h"
+#include "src/value-serializer.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 #include "src/wasm/wasm-module.h"
@@ -77,9 +79,6 @@
 #define LOG_API(isolate, class_name, function_name)                       \
   i::RuntimeCallTimerScope _runtime_timer(                                \
       isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                          \
-      isolate, &internal::tracing::TraceEventStatsTable::                 \
-                   API_##class_name##_##function_name);                   \
   LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
 
 #define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -105,6 +104,16 @@
   PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name,   \
                                 bailout_value, HandleScopeClass, do_callback);
 
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(      \
+    category, name, context, class_name, function_name, bailout_value,       \
+    HandleScopeClass, do_callback)                                           \
+  auto isolate = context.IsEmpty()                                           \
+                     ? i::Isolate::Current()                                 \
+                     : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
+  TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name);                    \
+  PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
+                                bailout_value, HandleScopeClass, do_callback);
+
 #define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \
                                            T)                                  \
   PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), class_name,         \
@@ -126,6 +135,10 @@
   PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name,       \
                                      Nothing<T>(), i::HandleScope, false)
 
+#define PREPARE_FOR_EXECUTION_BOOL(context, class_name, function_name)   \
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+                                     false, i::HandleScope, false)
+
 #define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
   do {                                                 \
     if (has_pending_exception) {                       \
@@ -142,6 +155,8 @@
 #define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
   EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, Nothing<T>())
 
+#define RETURN_ON_FAILED_EXECUTION_BOOL() \
+  EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false)
 
 #define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
   return maybe_local.FromMaybe(Local<T>());
@@ -513,7 +528,8 @@
 
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+  isolate->heap()->CollectAllAvailableGarbage(
+      i::GarbageCollectionReason::kSnapshotCreator);
   isolate->heap()->CompactWeakFixedArrays();
 
   i::DisallowHeapAllocation no_gc_from_here_on;
@@ -770,11 +786,6 @@
   return result.location();
 }
 
-void V8::RegisterExternallyReferencedObject(i::Object** object,
-                                            i::Isolate* isolate) {
-  isolate->heap()->RegisterExternallyReferencedObject(object);
-}
-
 void V8::MakeWeak(i::Object** location, void* parameter,
                   int internal_field_index1, int internal_field_index2,
                   WeakCallbackInfo<void>::Callback weak_callback) {
@@ -1503,12 +1514,17 @@
                       signature, i::FLAG_disable_old_api_accessors);
 }
 
-template <typename Getter, typename Setter, typename Query, typename Deleter,
-          typename Enumerator>
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+          typename Deleter, typename Enumerator, typename Definer>
 static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
     i::Isolate* isolate, Getter getter, Setter setter, Query query,
-    Deleter remover, Enumerator enumerator, Local<Value> data,
-    PropertyHandlerFlags flags) {
+    Descriptor descriptor, Deleter remover, Enumerator enumerator,
+    Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+  DCHECK(query == nullptr ||
+         descriptor == nullptr);  // Either intercept attributes or descriptor.
+  DCHECK(query == nullptr ||
+         definer ==
+             nullptr);  // Only use descriptor callback with definer callback.
   auto obj = i::Handle<i::InterceptorInfo>::cast(
       isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
   obj->set_flags(0);
@@ -1516,8 +1532,10 @@
   if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
   if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
   if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+  if (descriptor != 0) SET_FIELD_WRAPPED(obj, set_descriptor, descriptor);
   if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
   if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+  if (definer != 0) SET_FIELD_WRAPPED(obj, set_definer, definer);
   obj->set_can_intercept_symbols(
       !(static_cast<int>(flags) &
         static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@@ -1533,40 +1551,37 @@
   return obj;
 }
 
-template <typename Getter, typename Setter, typename Query, typename Deleter,
-          typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
-                                                  Getter getter, Setter setter,
-                                                  Query query, Deleter remover,
-                                                  Enumerator enumerator,
-                                                  Local<Value> data,
-                                                  PropertyHandlerFlags flags) {
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+          typename Deleter, typename Enumerator, typename Definer>
+static void ObjectTemplateSetNamedPropertyHandler(
+    ObjectTemplate* templ, Getter getter, Setter setter, Query query,
+    Descriptor descriptor, Deleter remover, Enumerator enumerator,
+    Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
   i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, templ);
   EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
-  auto obj = CreateInterceptorInfo(isolate, getter, setter, query, remover,
-                                   enumerator, data, flags);
+  auto obj = CreateInterceptorInfo(isolate, getter, setter, query, descriptor,
+                                   remover, enumerator, definer, data, flags);
   cons->set_named_property_handler(*obj);
 }
 
-
 void ObjectTemplate::SetNamedPropertyHandler(
     NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
     NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
     NamedPropertyEnumeratorCallback enumerator, Local<Value> data) {
   ObjectTemplateSetNamedPropertyHandler(
-      this, getter, setter, query, remover, enumerator, data,
+      this, getter, setter, query, nullptr, remover, enumerator, nullptr, data,
       PropertyHandlerFlags::kOnlyInterceptStrings);
 }
 
-
 void ObjectTemplate::SetHandler(
     const NamedPropertyHandlerConfiguration& config) {
   ObjectTemplateSetNamedPropertyHandler(
-      this, config.getter, config.setter, config.query, config.deleter,
-      config.enumerator, config.data, config.flags);
+      this, config.getter, config.setter, config.query, config.descriptor,
+      config.deleter, config.enumerator, config.definer, config.data,
+      config.flags);
 }
 
 
@@ -1626,13 +1641,14 @@
   SET_FIELD_WRAPPED(info, set_callback, callback);
   auto named_interceptor = CreateInterceptorInfo(
       isolate, named_handler.getter, named_handler.setter, named_handler.query,
-      named_handler.deleter, named_handler.enumerator, named_handler.data,
-      named_handler.flags);
+      named_handler.descriptor, named_handler.deleter, named_handler.enumerator,
+      named_handler.definer, named_handler.data, named_handler.flags);
   info->set_named_interceptor(*named_interceptor);
   auto indexed_interceptor = CreateInterceptorInfo(
       isolate, indexed_handler.getter, indexed_handler.setter,
-      indexed_handler.query, indexed_handler.deleter,
-      indexed_handler.enumerator, indexed_handler.data, indexed_handler.flags);
+      indexed_handler.query, indexed_handler.descriptor,
+      indexed_handler.deleter, indexed_handler.enumerator,
+      indexed_handler.definer, indexed_handler.data, indexed_handler.flags);
   info->set_indexed_interceptor(*indexed_interceptor);
 
   if (data.IsEmpty()) {
@@ -1651,9 +1667,10 @@
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
-  auto obj = CreateInterceptorInfo(
-      isolate, config.getter, config.setter, config.query, config.deleter,
-      config.enumerator, config.data, config.flags);
+  auto obj = CreateInterceptorInfo(isolate, config.getter, config.setter,
+                                   config.query, config.descriptor,
+                                   config.deleter, config.enumerator,
+                                   config.definer, config.data, config.flags);
   cons->set_indexed_property_handler(*obj);
 }
 
@@ -1834,17 +1851,19 @@
 
 
 MaybeLocal<Value> Script::Run(Local<Context> context) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Script, Run, Value)
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Script, Run, MaybeLocal<Value>(),
+      InternalEscapableScope, true);
   i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
-  TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
   auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
+
   i::Handle<i::Object> receiver = isolate->global_proxy();
   Local<Value> result;
-  has_pending_exception =
-      !ToLocal<Value>(i::Execution::Call(isolate, fun, receiver, 0, NULL),
-                      &result);
+  has_pending_exception = !ToLocal<Value>(
+      i::Execution::Call(isolate, fun, receiver, 0, nullptr), &result);
+
   RETURN_ON_FAILED_EXECUTION(Value);
   RETURN_ESCAPED(result);
 }
@@ -1866,6 +1885,58 @@
       i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
 }
 
+int Module::GetModuleRequestsLength() const {
+  i::Handle<i::Module> self = Utils::OpenHandle(this);
+  return self->info()->module_requests()->length();
+}
+
+Local<String> Module::GetModuleRequest(int i) const {
+  CHECK_GE(i, 0);
+  i::Handle<i::Module> self = Utils::OpenHandle(this);
+  i::Isolate* isolate = self->GetIsolate();
+  i::Handle<i::FixedArray> module_requests(self->info()->module_requests(),
+                                           isolate);
+  CHECK_LT(i, module_requests->length());
+  return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
+}
+
+void Module::SetEmbedderData(Local<Value> data) {
+  Utils::OpenHandle(this)->set_embedder_data(*Utils::OpenHandle(*data));
+}
+
+Local<Value> Module::GetEmbedderData() const {
+  auto self = Utils::OpenHandle(this);
+  return ToApiHandle<Value>(
+      i::handle(self->embedder_data(), self->GetIsolate()));
+}
+
+bool Module::Instantiate(Local<Context> context,
+                         Module::ResolveCallback callback,
+                         Local<Value> callback_data) {
+  PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate);
+  has_pending_exception = !i::Module::Instantiate(
+      Utils::OpenHandle(this), context, callback, callback_data);
+  RETURN_ON_FAILED_EXECUTION_BOOL();
+  return true;
+}
+
+MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Module, Evaluate, MaybeLocal<Value>(),
+      InternalEscapableScope, true);
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
+  i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+
+  i::Handle<i::Module> self = Utils::OpenHandle(this);
+  // It's an API error to call Evaluate before Instantiate.
+  CHECK(self->code()->IsJSFunction());
+
+  Local<Value> result;
+  has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
+  RETURN_ON_FAILED_EXECUTION(Value);
+  RETURN_ESCAPED(result);
+}
 
 MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
     Isolate* v8_isolate, Source* source, CompileOptions options,
@@ -1976,16 +2047,16 @@
   RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
 }
 
+MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
+                                                 Source* source) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
 
-MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
-                                                 Source* source,
-                                                 CompileOptions options) {
-  auto isolate = context->GetIsolate();
-  auto maybe = CompileUnboundInternal(isolate, source, options, true);
-  Local<UnboundScript> generic;
-  if (!maybe.ToLocal(&generic)) return MaybeLocal<Script>();
-  v8::Context::Scope scope(context);
-  return generic->BindToCurrentContext();
+  auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions, true);
+  Local<UnboundScript> unbound;
+  if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
+
+  i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
+  return ToApiHandle<Module>(i_isolate->factory()->NewModule(shared));
 }
 
 
@@ -2084,7 +2155,13 @@
         Utils::OpenHandle(*context_extensions[i]);
     if (!extension->IsJSObject()) return Local<Function>();
     i::Handle<i::JSFunction> closure(context->closure(), isolate);
-    context = factory->NewWithContext(closure, context, extension);
+    context = factory->NewWithContext(
+        closure, context,
+        i::ScopeInfo::CreateForWithScope(
+            isolate, context->IsNativeContext()
+                         ? i::Handle<i::ScopeInfo>::null()
+                         : i::Handle<i::ScopeInfo>(context->scope_info())),
+        extension);
   }
 
   i::Handle<i::Object> name_obj;
@@ -2138,6 +2215,9 @@
 
 ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
     Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
+  if (!i::FLAG_script_streaming) {
+    return nullptr;
+  }
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   return new i::BackgroundParsingTask(source->impl(), options,
                                       i::FLAG_stack_size, isolate);
@@ -2171,17 +2251,19 @@
   }
 
   source->info->set_script(script);
-  source->info->set_context(isolate->native_context());
 
-  // Create a canonical handle scope before internalizing parsed values if
-  // compiling bytecode. This is required for off-thread bytecode generation.
-  std::unique_ptr<i::CanonicalHandleScope> canonical;
-  if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
+  {
+    // Create a canonical handle scope if compiling ignition bytecode. This is
+    // required by the constant array builder to de-duplicate objects without
+    // dereferencing handles.
+    std::unique_ptr<i::CanonicalHandleScope> canonical;
+    if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
 
-  // Do the parsing tasks which need to be done on the main thread. This will
-  // also handle parse errors.
-  source->parser->Internalize(isolate, script,
-                              source->info->literal() == nullptr);
+    // Do the parsing tasks which need to be done on the main thread. This will
+    // also handle parse errors.
+    source->parser->Internalize(isolate, script,
+                                source->info->literal() == nullptr);
+  }
   source->parser->HandleSourceURLComments(isolate, script);
 
   i::Handle<i::SharedFunctionInfo> result;
@@ -2192,9 +2274,10 @@
   }
   has_pending_exception = result.is_null();
   if (has_pending_exception) isolate->ReportPendingMessages();
-  RETURN_ON_FAILED_EXECUTION(Script);
 
-  source->info->clear_script();  // because script goes out of scope.
+  source->Release();
+
+  RETURN_ON_FAILED_EXECUTION(Script);
 
   Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
   if (generic.IsEmpty()) return Local<Script>();
@@ -2263,8 +2346,8 @@
   ResetInternal();
   // Special handling for simulators which have a separate JS stack.
   js_stack_comparable_address_ =
-      reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
-          isolate_, v8::internal::GetCurrentStackPosition()));
+      reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
+          isolate_, i::GetCurrentStackPosition()));
   isolate_->RegisterTryCatchHandler(this);
 }
 
@@ -2280,8 +2363,8 @@
   ResetInternal();
   // Special handling for simulators which have a separate JS stack.
   js_stack_comparable_address_ =
-      reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
-          isolate_, v8::internal::GetCurrentStackPosition()));
+      reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
+          isolate_, i::GetCurrentStackPosition()));
   isolate_->RegisterTryCatchHandler(this);
 }
 
@@ -2300,7 +2383,7 @@
       isolate_->RestorePendingMessageFromTryCatch(this);
     }
     isolate_->UnregisterTryCatchHandler(this);
-    v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
+    i::SimulatorStack::UnregisterCTryCatch(isolate_);
     reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
     DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
   } else {
@@ -2311,7 +2394,7 @@
       isolate_->CancelScheduledExceptionFromTryCatch(this);
     }
     isolate_->UnregisterTryCatchHandler(this);
-    v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
+    i::SimulatorStack::UnregisterCTryCatch(isolate_);
   }
 }
 
@@ -2832,6 +2915,205 @@
   RETURN_ESCAPED(result);
 }
 
+// --- V a l u e   S e r i a l i z a t i o n ---
+
+Maybe<bool> ValueSerializer::Delegate::WriteHostObject(Isolate* v8_isolate,
+                                                       Local<Object> object) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  isolate->ScheduleThrow(*isolate->factory()->NewError(
+      isolate->error_function(), i::MessageTemplate::kDataCloneError,
+      Utils::OpenHandle(*object)));
+  return Nothing<bool>();
+}
+
+struct ValueSerializer::PrivateData {
+  explicit PrivateData(i::Isolate* i, ValueSerializer::Delegate* delegate)
+      : isolate(i), serializer(i, delegate) {}
+  i::Isolate* isolate;
+  i::ValueSerializer serializer;
+};
+
+ValueSerializer::ValueSerializer(Isolate* isolate)
+    : ValueSerializer(isolate, nullptr) {}
+
+ValueSerializer::ValueSerializer(Isolate* isolate, Delegate* delegate)
+    : private_(
+          new PrivateData(reinterpret_cast<i::Isolate*>(isolate), delegate)) {}
+
+ValueSerializer::~ValueSerializer() { delete private_; }
+
+void ValueSerializer::WriteHeader() { private_->serializer.WriteHeader(); }
+
+Maybe<bool> ValueSerializer::WriteValue(Local<Context> context,
+                                        Local<Value> value) {
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueSerializer, WriteValue, bool);
+  i::Handle<i::Object> object = Utils::OpenHandle(*value);
+  Maybe<bool> result = private_->serializer.WriteObject(object);
+  has_pending_exception = result.IsNothing();
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return result;
+}
+
+std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
+  return private_->serializer.ReleaseBuffer();
+}
+
+void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
+                                          Local<ArrayBuffer> array_buffer) {
+  private_->serializer.TransferArrayBuffer(transfer_id,
+                                           Utils::OpenHandle(*array_buffer));
+}
+
+void ValueSerializer::TransferSharedArrayBuffer(
+    uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
+  private_->serializer.TransferArrayBuffer(
+      transfer_id, Utils::OpenHandle(*shared_array_buffer));
+}
+
+void ValueSerializer::WriteUint32(uint32_t value) {
+  private_->serializer.WriteUint32(value);
+}
+
+void ValueSerializer::WriteUint64(uint64_t value) {
+  private_->serializer.WriteUint64(value);
+}
+
+void ValueSerializer::WriteDouble(double value) {
+  private_->serializer.WriteDouble(value);
+}
+
+void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
+  private_->serializer.WriteRawBytes(source, length);
+}
+
+MaybeLocal<Object> ValueDeserializer::Delegate::ReadHostObject(
+    Isolate* v8_isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  isolate->ScheduleThrow(*isolate->factory()->NewError(
+      isolate->error_function(),
+      i::MessageTemplate::kDataCloneDeserializationError));
+  return MaybeLocal<Object>();
+}
+
+struct ValueDeserializer::PrivateData {
+  PrivateData(i::Isolate* i, i::Vector<const uint8_t> data, Delegate* delegate)
+      : isolate(i), deserializer(i, data, delegate) {}
+  i::Isolate* isolate;
+  i::ValueDeserializer deserializer;
+  bool has_aborted = false;
+  bool supports_legacy_wire_format = false;
+};
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
+                                     size_t size)
+    : ValueDeserializer(isolate, data, size, nullptr) {}
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
+                                     size_t size, Delegate* delegate) {
+  if (base::IsValueInRangeForNumericType<int>(size)) {
+    private_ = new PrivateData(
+        reinterpret_cast<i::Isolate*>(isolate),
+        i::Vector<const uint8_t>(data, static_cast<int>(size)), delegate);
+  } else {
+    private_ = new PrivateData(reinterpret_cast<i::Isolate*>(isolate),
+                               i::Vector<const uint8_t>(nullptr, 0), nullptr);
+    private_->has_aborted = true;
+  }
+}
+
+ValueDeserializer::~ValueDeserializer() { delete private_; }
+
+Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueDeserializer, ReadHeader, bool);
+
+  // We could have aborted during the constructor.
+  // If so, ReadHeader is where we report it.
+  if (private_->has_aborted) {
+    isolate->Throw(*isolate->factory()->NewError(
+        i::MessageTemplate::kDataCloneDeserializationError));
+    has_pending_exception = true;
+    RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  }
+
+  bool read_header = false;
+  has_pending_exception = !private_->deserializer.ReadHeader().To(&read_header);
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  DCHECK(read_header);
+
+  // TODO(jbroman): Today, all wire formats are "legacy". When a more supported
+  // format is added, compare the version of the internal serializer to the
+  // minimum non-legacy version number.
+  if (!private_->supports_legacy_wire_format) {
+    isolate->Throw(*isolate->factory()->NewError(
+        i::MessageTemplate::kDataCloneDeserializationVersionError));
+    has_pending_exception = true;
+    RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  }
+
+  return Just(true);
+}
+
+Maybe<bool> ValueDeserializer::ReadHeader() {
+  Isolate* isolate = reinterpret_cast<Isolate*>(private_->isolate);
+  return ReadHeader(isolate->GetEnteredContext());
+}
+
+void ValueDeserializer::SetSupportsLegacyWireFormat(
+    bool supports_legacy_wire_format) {
+  private_->supports_legacy_wire_format = supports_legacy_wire_format;
+}
+
+uint32_t ValueDeserializer::GetWireFormatVersion() const {
+  CHECK(!private_->has_aborted);
+  return private_->deserializer.GetWireFormatVersion();
+}
+
+MaybeLocal<Value> ValueDeserializer::ReadValue(Local<Context> context) {
+  CHECK(!private_->has_aborted);
+  PREPARE_FOR_EXECUTION(context, ValueDeserializer, ReadValue, Value);
+  i::MaybeHandle<i::Object> result;
+  if (GetWireFormatVersion() > 0) {
+    result = private_->deserializer.ReadObject();
+  } else {
+    result =
+        private_->deserializer.ReadObjectUsingEntireBufferForLegacyFormat();
+  }
+  Local<Value> value;
+  has_pending_exception = !ToLocal(result, &value);
+  RETURN_ON_FAILED_EXECUTION(Value);
+  RETURN_ESCAPED(value);
+}
+
+void ValueDeserializer::TransferArrayBuffer(uint32_t transfer_id,
+                                            Local<ArrayBuffer> array_buffer) {
+  CHECK(!private_->has_aborted);
+  private_->deserializer.TransferArrayBuffer(transfer_id,
+                                             Utils::OpenHandle(*array_buffer));
+}
+
+void ValueDeserializer::TransferSharedArrayBuffer(
+    uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
+  CHECK(!private_->has_aborted);
+  private_->deserializer.TransferArrayBuffer(
+      transfer_id, Utils::OpenHandle(*shared_array_buffer));
+}
+
+bool ValueDeserializer::ReadUint32(uint32_t* value) {
+  return private_->deserializer.ReadUint32(value);
+}
+
+bool ValueDeserializer::ReadUint64(uint64_t* value) {
+  return private_->deserializer.ReadUint64(value);
+}
+
+bool ValueDeserializer::ReadDouble(double* value) {
+  return private_->deserializer.ReadDouble(value);
+}
+
+bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
+  return private_->deserializer.ReadRawBytes(length, data);
+}
+
 // --- D a t a ---
 
 bool Value::FullIsUndefined() const {
@@ -3019,12 +3301,18 @@
   return obj->IsJSRegExp();
 }
 
+bool Value::IsAsyncFunction() const {
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (!obj->IsJSFunction()) return false;
+  i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
+  return i::IsAsyncFunction(func->shared()->kind());
+}
 
 bool Value::IsGeneratorFunction() const {
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   if (!obj->IsJSFunction()) return false;
   i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
-  return func->shared()->is_generator();
+  return i::IsGeneratorFunction(func->shared()->kind());
 }
 
 
@@ -3662,6 +3950,98 @@
   return result;
 }
 
+struct v8::PropertyDescriptor::PrivateData {
+  PrivateData() : desc() {}
+  i::PropertyDescriptor desc;
+};
+
+v8::PropertyDescriptor::PropertyDescriptor() : private_(new PrivateData()) {}
+
+// DataDescriptor
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> value)
+    : private_(new PrivateData()) {
+  private_->desc.set_value(Utils::OpenHandle(*value, true));
+}
+
+// DataDescriptor with writable field
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> value,
+                                           bool writable)
+    : private_(new PrivateData()) {
+  private_->desc.set_value(Utils::OpenHandle(*value, true));
+  private_->desc.set_writable(writable);
+}
+
+// AccessorDescriptor
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> get,
+                                           v8::Local<v8::Value> set)
+    : private_(new PrivateData()) {
+  DCHECK(get.IsEmpty() || get->IsUndefined() || get->IsFunction());
+  DCHECK(set.IsEmpty() || set->IsUndefined() || set->IsFunction());
+  private_->desc.set_get(Utils::OpenHandle(*get, true));
+  private_->desc.set_set(Utils::OpenHandle(*set, true));
+}
+
+v8::PropertyDescriptor::~PropertyDescriptor() { delete private_; }
+
+v8::Local<Value> v8::PropertyDescriptor::value() const {
+  DCHECK(private_->desc.has_value());
+  return Utils::ToLocal(private_->desc.value());
+}
+
+v8::Local<Value> v8::PropertyDescriptor::get() const {
+  DCHECK(private_->desc.has_get());
+  return Utils::ToLocal(private_->desc.get());
+}
+
+v8::Local<Value> v8::PropertyDescriptor::set() const {
+  DCHECK(private_->desc.has_set());
+  return Utils::ToLocal(private_->desc.set());
+}
+
+bool v8::PropertyDescriptor::has_value() const {
+  return private_->desc.has_value();
+}
+bool v8::PropertyDescriptor::has_get() const {
+  return private_->desc.has_get();
+}
+bool v8::PropertyDescriptor::has_set() const {
+  return private_->desc.has_set();
+}
+
+bool v8::PropertyDescriptor::writable() const {
+  DCHECK(private_->desc.has_writable());
+  return private_->desc.writable();
+}
+
+bool v8::PropertyDescriptor::has_writable() const {
+  return private_->desc.has_writable();
+}
+
+void v8::PropertyDescriptor::set_enumerable(bool enumerable) {
+  private_->desc.set_enumerable(enumerable);
+}
+
+bool v8::PropertyDescriptor::enumerable() const {
+  DCHECK(private_->desc.has_enumerable());
+  return private_->desc.enumerable();
+}
+
+bool v8::PropertyDescriptor::has_enumerable() const {
+  return private_->desc.has_enumerable();
+}
+
+void v8::PropertyDescriptor::set_configurable(bool configurable) {
+  private_->desc.set_configurable(configurable);
+}
+
+bool v8::PropertyDescriptor::configurable() const {
+  DCHECK(private_->desc.has_configurable());
+  return private_->desc.configurable();
+}
+
+bool v8::PropertyDescriptor::has_configurable() const {
+  return private_->desc.has_configurable();
+}
 
 Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
                                           v8::Local<Name> key,
@@ -3672,13 +4052,6 @@
   i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
 
-  if (self->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()),
-                          i::Handle<i::JSObject>::cast(self))) {
-    isolate->ReportFailedAccessCheck(i::Handle<i::JSObject>::cast(self));
-    return Nothing<bool>();
-  }
-
   i::PropertyDescriptor desc;
   desc.set_writable(!(attributes & v8::ReadOnly));
   desc.set_enumerable(!(attributes & v8::DontEnum));
@@ -3691,6 +4064,19 @@
   return success;
 }
 
+Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
+                                       v8::Local<Name> key,
+                                       PropertyDescriptor& descriptor) {
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineProperty, bool);
+  i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+  i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
+
+  Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+      isolate, self, key_obj, &descriptor.get_private()->desc,
+      i::Object::DONT_THROW);
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return success;
+}
 
 MUST_USE_RESULT
 static i::MaybeHandle<i::Object> DefineObjectProperty(
@@ -4408,9 +4794,10 @@
 MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
                                          Local<Value> recv, int argc,
                                          Local<Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsFunction, Value);
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Object, CallAsFunction, MaybeLocal<Value>(),
+      InternalEscapableScope, true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
-  TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   auto recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4434,10 +4821,10 @@
 
 MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
                                             Local<Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsConstructor,
-                                      Value);
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Object, CallAsConstructor,
+      MaybeLocal<Value>(), InternalEscapableScope, true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
-  TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
   i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4485,9 +4872,10 @@
 
 MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
                                          v8::Local<v8::Value> argv[]) const {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, NewInstance, Object);
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Function, NewInstance, MaybeLocal<Object>(),
+      InternalEscapableScope, true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
-  TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
   i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4509,9 +4897,10 @@
 MaybeLocal<v8::Value> Function::Call(Local<Context> context,
                                      v8::Local<v8::Value> recv, int argc,
                                      v8::Local<v8::Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, Call, Value);
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+      "v8", "V8.Execute", context, Function, Call, MaybeLocal<Value>(),
+      InternalEscapableScope, true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
-  TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -5708,8 +6097,8 @@
                           v8::MaybeLocal<Value> global_object,
                           size_t context_snapshot_index) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
-  LOG_API(isolate, Context, New);
   TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
+  LOG_API(isolate, Context, New);
   i::HandleScope scope(isolate);
   ExtensionConfiguration no_extensions;
   if (extensions == NULL) extensions = &no_extensions;
@@ -6820,8 +7209,9 @@
   if (!maybe_compiled_part.ToHandle(&compiled_part)) {
     return MaybeLocal<WasmCompiledModule>();
   }
-  return Local<WasmCompiledModule>::Cast(Utils::ToLocal(
-      i::wasm::CreateCompiledModuleObject(i_isolate, compiled_part)));
+  return Local<WasmCompiledModule>::Cast(
+      Utils::ToLocal(i::wasm::CreateCompiledModuleObject(
+          i_isolate, compiled_part, i::wasm::ModuleOrigin::kWasmOrigin)));
 }
 
 // static
@@ -7234,8 +7624,7 @@
 void Isolate::ReportExternalAllocationLimitReached() {
   i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
   if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
-  heap->ReportExternalMemoryPressure(
-      "external memory allocation limit reached.");
+  heap->ReportExternalMemoryPressure();
 }
 
 
@@ -7303,27 +7692,24 @@
 void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
   internal_isolate->global_handles()->SetObjectGroupId(
-      v8::internal::Handle<v8::internal::Object>(object).location(),
-      id);
+      i::Handle<i::Object>(object).location(), id);
 }
 
 
 void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
   internal_isolate->global_handles()->SetReferenceFromGroup(
-      id,
-      v8::internal::Handle<v8::internal::Object>(object).location());
+      id, i::Handle<i::Object>(object).location());
 }
 
 
 void Isolate::SetReference(internal::Object** parent,
                            internal::Object** child) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
-  i::Object** parent_location =
-      v8::internal::Handle<v8::internal::Object>(parent).location();
+  i::Object** parent_location = i::Handle<i::Object>(parent).location();
   internal_isolate->global_handles()->SetReference(
       reinterpret_cast<i::HeapObject**>(parent_location),
-      v8::internal::Handle<v8::internal::Object>(child).location());
+      i::Handle<i::Object>(child).location());
 }
 
 
@@ -7398,13 +7784,13 @@
   CHECK(i::FLAG_expose_gc);
   if (type == kMinorGarbageCollection) {
     reinterpret_cast<i::Isolate*>(this)->heap()->CollectGarbage(
-        i::NEW_SPACE, "Isolate::RequestGarbageCollection",
+        i::NEW_SPACE, i::GarbageCollectionReason::kTesting,
         kGCCallbackFlagForced);
   } else {
     DCHECK_EQ(kFullGarbageCollection, type);
     reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
         i::Heap::kAbortIncrementalMarkingMask,
-        "Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
+        i::GarbageCollectionReason::kTesting, kGCCallbackFlagForced);
   }
 }
 
@@ -7833,7 +8219,8 @@
     i::HistogramTimerScope idle_notification_scope(
         isolate->counters()->gc_low_memory_notification());
     TRACE_EVENT0("v8", "V8.GCLowMemoryNotification");
-    isolate->heap()->CollectAllAvailableGarbage("low memory notification");
+    isolate->heap()->CollectAllAvailableGarbage(
+        i::GarbageCollectionReason::kLowMemoryNotification);
   }
 }
 
@@ -7857,8 +8244,7 @@
 
 void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  return isolate->heap()->MemoryPressureNotification(level,
-                                                     Locker::IsLocked(this));
+  isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
 }
 
 void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8325,6 +8711,10 @@
   }
 }
 
+const char* CpuProfileNode::GetFunctionNameStr() const {
+  const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+  return node->entry()->name();
+}
 
 int CpuProfileNode::GetScriptId() const {
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
@@ -8332,7 +8722,6 @@
   return entry->script_id();
 }
 
-
 Local<String> CpuProfileNode::GetScriptResourceName() const {
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
   i::Isolate* isolate = node->isolate();
@@ -8340,6 +8729,10 @@
       node->entry()->resource_name()));
 }
 
+const char* CpuProfileNode::GetScriptResourceNameStr() const {
+  const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+  return node->entry()->resource_name();
+}
 
 int CpuProfileNode::GetLineNumber() const {
   return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
@@ -8966,9 +9359,6 @@
   Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::AccessorGetterCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate,
-      &internal::tracing::TraceEventStatsTable::AccessorGetterCallback);
   Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
       getter));
   VMState<EXTERNAL> state(isolate);
@@ -8982,9 +9372,6 @@
   Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::InvokeFunctionCallback);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate,
-      &internal::tracing::TraceEventStatsTable::InvokeFunctionCallback);
   Address callback_address =
       reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
   VMState<EXTERNAL> state(isolate);
diff --git a/src/api.h b/src/api.h
index ede7ba9..22c10dd 100644
--- a/src/api.h
+++ b/src/api.h
@@ -69,7 +69,6 @@
   static RegisteredExtension* first_extension_;
 };
 
-
 #define OPEN_HANDLE_LIST(V)                  \
   V(Template, TemplateInfo)                  \
   V(FunctionTemplate, FunctionTemplateInfo)  \
@@ -101,6 +100,7 @@
   V(Symbol, Symbol)                          \
   V(Script, JSFunction)                      \
   V(UnboundScript, SharedFunctionInfo)       \
+  V(Module, Module)                          \
   V(Function, JSReceiver)                    \
   V(Message, JSMessageObject)                \
   V(Context, Context)                        \
@@ -124,6 +124,8 @@
       v8::internal::Handle<v8::internal::Context> obj);
   static inline Local<Value> ToLocal(
       v8::internal::Handle<v8::internal::Object> obj);
+  static inline Local<Module> ToLocal(
+      v8::internal::Handle<v8::internal::Module> obj);
   static inline Local<Name> ToLocal(
       v8::internal::Handle<v8::internal::Name> obj);
   static inline Local<String> ToLocal(
@@ -136,6 +138,8 @@
       v8::internal::Handle<v8::internal::JSReceiver> obj);
   static inline Local<Object> ToLocal(
       v8::internal::Handle<v8::internal::JSObject> obj);
+  static inline Local<Function> ToLocal(
+      v8::internal::Handle<v8::internal::JSFunction> obj);
   static inline Local<Array> ToLocal(
       v8::internal::Handle<v8::internal::JSArray> obj);
   static inline Local<Map> ToLocal(
@@ -284,12 +288,14 @@
 
 MAKE_TO_LOCAL(ToLocal, Context, Context)
 MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, Module, Module)
 MAKE_TO_LOCAL(ToLocal, Name, Name)
 MAKE_TO_LOCAL(ToLocal, String, String)
 MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
 MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
 MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
 MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
 MAKE_TO_LOCAL(ToLocal, JSArray, Array)
 MAKE_TO_LOCAL(ToLocal, JSMap, Map)
 MAKE_TO_LOCAL(ToLocal, JSSet, Set)
diff --git a/src/arguments.h b/src/arguments.h
index 9c629ce..92c7075 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -81,21 +81,20 @@
 
 // TODO(cbruni): add global flag to check whether any tracing events have been
 // enabled.
-// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
 #define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                             \
   static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate));     \
                                                                               \
   V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
                                        Isolate* isolate) {                    \
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name);            \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                     \
+                 "V8.Runtime_" #Name);                                        \
     Arguments args(args_length, args_object);                                 \
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                            \
-        isolate, &tracing::TraceEventStatsTable::Name);                       \
     return __RT_impl_##Name(args, isolate);                                   \
   }                                                                           \
                                                                               \
   Type Name(int args_length, Object** args_object, Isolate* isolate) {        \
-    CHECK(isolate->context() == nullptr || isolate->context()->IsContext());  \
+    DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
     CLOBBER_DOUBLE_REGISTERS();                                               \
     if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||       \
                     FLAG_runtime_call_stats)) {                               \
diff --git a/src/arm/OWNERS b/src/arm/OWNERS
deleted file mode 100644
index 906a5ce..0000000
--- a/src/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index b1f33e0..bc501b1 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -46,7 +46,7 @@
 namespace v8 {
 namespace internal {
 
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+bool CpuFeatures::SupportsCrankshaft() { return true; }
 
 bool CpuFeatures::SupportsSimd128() { return false; }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 78ffe25..ee02027 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -46,97 +46,203 @@
 namespace v8 {
 namespace internal {
 
-// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
-// can be defined to enable ARMv7 and VFPv3 instructions when building the
-// snapshot.
-static unsigned CpuFeaturesImpliedByCompiler() {
-  unsigned answer = 0;
-#ifdef CAN_USE_ARMV8_INSTRUCTIONS
-  if (FLAG_enable_armv8) {
-    answer |= 1u << ARMv8;
-    // ARMv8 always features VFP and NEON.
-    answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    answer |= 1u << SUDIV;
-  }
-#endif  // CAN_USE_ARMV8_INSTRUCTIONS
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  if (FLAG_enable_armv7) answer |= 1u << ARMv7;
-#endif  // CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP3_INSTRUCTIONS
-  if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
-#endif  // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP32DREGS
-  if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
-#endif  // CAN_USE_VFP32DREGS
-#ifdef CAN_USE_NEON
-  if (FLAG_enable_neon) answer |= 1u << NEON;
-#endif  // CAN_USE_VFP32DREGS
+static const unsigned kArmv6 = 0u;
+static const unsigned kArmv7 = kArmv6 | (1u << ARMv7);
+static const unsigned kArmv7WithSudiv = kArmv7 | (1u << ARMv7_SUDIV);
+static const unsigned kArmv8 = kArmv7WithSudiv | (1u << ARMv8);
 
-  return answer;
+static unsigned CpuFeaturesFromCommandLine() {
+  unsigned result;
+  if (strcmp(FLAG_arm_arch, "armv8") == 0) {
+    result = kArmv8;
+  } else if (strcmp(FLAG_arm_arch, "armv7+sudiv") == 0) {
+    result = kArmv7WithSudiv;
+  } else if (strcmp(FLAG_arm_arch, "armv7") == 0) {
+    result = kArmv7;
+  } else if (strcmp(FLAG_arm_arch, "armv6") == 0) {
+    result = kArmv6;
+  } else {
+    fprintf(stderr, "Error: unrecognised value for --arm-arch ('%s').\n",
+            FLAG_arm_arch);
+    fprintf(stderr,
+            "Supported values are:  armv8\n"
+            "                       armv7+sudiv\n"
+            "                       armv7\n"
+            "                       armv6\n");
+    CHECK(false);
+  }
+
+  // If any of the old (deprecated) flags are specified, print a warning, but
+  // otherwise try to respect them for now.
+  // TODO(jbramley): When all the old bots have been updated, remove this.
+  if (FLAG_enable_armv7.has_value || FLAG_enable_vfp3.has_value ||
+      FLAG_enable_32dregs.has_value || FLAG_enable_neon.has_value ||
+      FLAG_enable_sudiv.has_value || FLAG_enable_armv8.has_value) {
+    // As an approximation of the old behaviour, set the default values from the
+    // arm_arch setting, then apply the flags over the top.
+    bool enable_armv7 = (result & (1u << ARMv7)) != 0;
+    bool enable_vfp3 = (result & (1u << ARMv7)) != 0;
+    bool enable_32dregs = (result & (1u << ARMv7)) != 0;
+    bool enable_neon = (result & (1u << ARMv7)) != 0;
+    bool enable_sudiv = (result & (1u << ARMv7_SUDIV)) != 0;
+    bool enable_armv8 = (result & (1u << ARMv8)) != 0;
+    if (FLAG_enable_armv7.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_armv7 is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_armv7 = FLAG_enable_armv7.value;
+    }
+    if (FLAG_enable_vfp3.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_vfp3 is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_vfp3 = FLAG_enable_vfp3.value;
+    }
+    if (FLAG_enable_32dregs.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_32dregs is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_32dregs = FLAG_enable_32dregs.value;
+    }
+    if (FLAG_enable_neon.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_neon is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_neon = FLAG_enable_neon.value;
+    }
+    if (FLAG_enable_sudiv.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_sudiv is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_sudiv = FLAG_enable_sudiv.value;
+    }
+    if (FLAG_enable_armv8.has_value) {
+      fprintf(stderr,
+              "Warning: --enable_armv8 is deprecated. "
+              "Use --arm_arch instead.\n");
+      enable_armv8 = FLAG_enable_armv8.value;
+    }
+    // Emulate the old implications.
+    if (enable_armv8) {
+      enable_vfp3 = true;
+      enable_neon = true;
+      enable_32dregs = true;
+      enable_sudiv = true;
+    }
+    // Select the best available configuration.
+    if (enable_armv7 && enable_vfp3 && enable_32dregs && enable_neon) {
+      if (enable_sudiv) {
+        if (enable_armv8) {
+          result = kArmv8;
+        } else {
+          result = kArmv7WithSudiv;
+        }
+      } else {
+        result = kArmv7;
+      }
+    } else {
+      result = kArmv6;
+    }
+  }
+  return result;
+}
+
+// Get the CPU features enabled by the build.
+// For cross compilation the preprocessor symbols such as
+// CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS can be used to
+// enable ARMv7 and VFPv3 instructions when building the snapshot. However,
+// these flags should be consistent with a supported ARM configuration:
+//  "armv6":       ARMv6 + VFPv2
+//  "armv7":       ARMv7 + VFPv3-D32 + NEON
+//  "armv7+sudiv": ARMv7 + VFPv4-D32 + NEON + SUDIV
+//  "armv8":       ARMv8 (+ all of the above)
+static constexpr unsigned CpuFeaturesFromCompiler() {
+// TODO(jbramley): Once the build flags are simplified, these tests should
+// also be simplified.
+
+// Check *architectural* implications.
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
+#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_ARMV7_INSTRUCTIONS"
+#endif
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_SUDIV)
+#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_SUDIV"
+#endif
+#if defined(CAN_USE_ARMV7_INSTRUCTIONS) != defined(CAN_USE_VFP3_INSTRUCTIONS)
+// V8 requires VFP, and all ARMv7 devices with VFP have VFPv3. Similarly,
+// VFPv3 isn't available before ARMv7.
+#error "CAN_USE_ARMV7_INSTRUCTIONS should match CAN_USE_VFP3_INSTRUCTIONS"
+#endif
+#if defined(CAN_USE_NEON) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
+#error "CAN_USE_NEON should imply CAN_USE_ARMV7_INSTRUCTIONS"
+#endif
+
+// Find compiler-implied features.
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) &&                           \
+    defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+    defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+  return kArmv8;
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+    defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+  return kArmv7WithSudiv;
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
+    defined(CAN_USE_VFP3_INSTRUCTIONS)
+  return kArmv7;
+#else
+  return kArmv6;
+#endif
 }
 
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  supported_ |= CpuFeaturesImpliedByCompiler();
   dcache_line_size_ = 64;
 
+  unsigned command_line = CpuFeaturesFromCommandLine();
   // Only use statically determined features for cross compile (snapshot).
-  if (cross_compile) return;
+  if (cross_compile) {
+    supported_ |= command_line & CpuFeaturesFromCompiler();
+    return;
+  }
 
 #ifndef __arm__
   // For the simulator build, use whatever the flags specify.
-  if (FLAG_enable_armv8) {
-    supported_ |= 1u << ARMv8;
-    // ARMv8 always features VFP and NEON.
-    supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    supported_ |= 1u << SUDIV;
-    if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
-  }
-  if (FLAG_enable_armv7) {
-    supported_ |= 1u << ARMv7;
-    if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
-    if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
-    if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
-    if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
-    if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
-  }
+  supported_ |= command_line;
 
 #else  // __arm__
   // Probe for additional features at runtime.
   base::CPU cpu;
-  if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
-    // This implementation also sets the VFP flags if runtime
-    // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
-    // 0406B, page A1-6.
-    supported_ |= 1u << VFP3 | 1u << ARMv7;
-  }
-
-  if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
-  if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
-
-  if (cpu.architecture() >= 7) {
-    if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
-    if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
-      supported_ |= 1u << ARMv8;
-    }
-    // Use movw/movt for QUALCOMM ARMv7 cores.
-    if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
-      supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+  // Runtime detection is slightly fuzzy, and some inferences are necessary.
+  unsigned runtime = kArmv6;
+  // NEON and VFPv3 imply at least ARMv7-A.
+  if (cpu.has_neon() && cpu.has_vfp3_d32()) {
+    DCHECK(cpu.has_vfp3());
+    runtime |= kArmv7;
+    if (cpu.has_idiva()) {
+      runtime |= kArmv7WithSudiv;
+      if (cpu.architecture() >= 8) {
+        runtime |= kArmv8;
+      }
     }
   }
 
+  // Use the best of the features found by CPU detection and those inferred from
+  // the build system. In both cases, restrict available features using the
+  // command-line. Note that the command-line flags are very permissive (kArmv8)
+  // by default.
+  supported_ |= command_line & CpuFeaturesFromCompiler();
+  supported_ |= command_line & runtime;
+
+  // Additional tuning options.
+
   // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
   if (cpu.implementer() == base::CPU::ARM &&
       (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
        cpu.part() == base::CPU::ARM_CORTEX_A9)) {
     dcache_line_size_ = 32;
   }
-
-  if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
 #endif
 
-  DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
+  DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7));
+  DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
 }
 
 
@@ -195,13 +301,10 @@
 
 
 void CpuFeatures::PrintFeatures() {
-  printf(
-      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
-      "MOVW_MOVT_IMMEDIATE_LOADS=%d",
-      CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
-      CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
-      CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
-      CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+  printf("ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
+         CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
+         CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
+         CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV));
 #ifdef __arm__
   bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
 #elif USE_EABI_HARDFLOAT
@@ -209,7 +312,7 @@
 #else
   bool eabi_hardfloat = false;
 #endif
-    printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
+  printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
 }
 
 
@@ -481,6 +584,12 @@
   first_const_pool_64_use_ = -1;
   last_bound_pos_ = 0;
   ClearRecordedAstId();
+  if (CpuFeatures::IsSupported(VFP32DREGS)) {
+    // Register objects tend to be abstracted and survive between scopes, so
+    // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
+    // its use consistent with other features, we always enable it if we can.
+    EnableCpuFeature(VFP32DREGS);
+  }
 }
 
 
@@ -860,10 +969,12 @@
         if (target16_1 == 0) {
           CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
                               1, CodePatcher::DONT_FLUSH);
+          CpuFeatureScope scope(patcher.masm(), ARMv7);
           patcher.masm()->movw(dst, target16_0);
         } else {
           CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
                               2, CodePatcher::DONT_FLUSH);
+          CpuFeatureScope scope(patcher.masm(), ARMv7);
           patcher.masm()->movw(dst, target16_0);
           patcher.masm()->movt(dst, target16_1);
         }
@@ -1075,13 +1186,10 @@
 
 static bool use_mov_immediate_load(const Operand& x,
                                    const Assembler* assembler) {
-  if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
+  DCHECK(assembler != nullptr);
+  if (FLAG_enable_embedded_constant_pool &&
       !assembler->is_constant_pool_available()) {
     return true;
-  } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
-             (assembler == NULL || !assembler->predictable_code_size())) {
-    // Prefer movw / movt to constant pool if it is more efficient on the CPU.
-    return true;
   } else if (x.must_output_reloc_info(assembler)) {
     // Prefer constant pool if data is likely to be patched.
     return false;
@@ -1094,6 +1202,7 @@
 
 int Operand::instructions_required(const Assembler* assembler,
                                    Instr instr) const {
+  DCHECK(assembler != nullptr);
   if (rm_.is_valid()) return 1;
   uint32_t dummy1, dummy2;
   if (must_output_reloc_info(assembler) ||
@@ -1105,8 +1214,7 @@
     if (use_mov_immediate_load(*this, assembler)) {
       // A movw / movt or mov / orr immediate load.
       instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
-    } else if (assembler != NULL &&
-               assembler->ConstantPoolAccessIsInOverflow()) {
+    } else if (assembler->ConstantPoolAccessIsInOverflow()) {
       // An overflowed constant pool load.
       instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
     } else {
@@ -1140,6 +1248,7 @@
   if (use_mov_immediate_load(x, this)) {
     Register target = rd.code() == pc.code() ? ip : rd;
     if (CpuFeatures::IsSupported(ARMv7)) {
+      CpuFeatureScope scope(this, ARMv7);
       if (!FLAG_enable_embedded_constant_pool &&
           x.must_output_reloc_info(this)) {
         // Make sure the movw/movt doesn't get separated.
@@ -1166,6 +1275,7 @@
       Register target = rd.code() == pc.code() ? ip : rd;
       // Emit instructions to load constant pool offset.
       if (CpuFeatures::IsSupported(ARMv7)) {
+        CpuFeatureScope scope(this, ARMv7);
         movw(target, 0, cond);
         movt(target, 0, cond);
       } else {
@@ -1376,8 +1486,7 @@
   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
 }
 
-
-void Assembler::blx(int branch_offset) {  // v5 and above
+void Assembler::blx(int branch_offset) {
   DCHECK((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
@@ -1385,14 +1494,12 @@
   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
 }
 
-
-void Assembler::blx(Register target, Condition cond) {  // v5 and above
+void Assembler::blx(Register target, Condition cond) {
   DCHECK(!target.is(pc));
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
 }
 
-
-void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
+void Assembler::bx(Register target, Condition cond) {
   DCHECK(!target.is(pc));  // use of pc is actually allowed, but discouraged
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
 }
@@ -1548,13 +1655,13 @@
 
 
 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
 }
 
 
 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
 }
 
@@ -1684,7 +1791,6 @@
 
 // Miscellaneous arithmetic instructions.
 void Assembler::clz(Register dst, Register src, Condition cond) {
-  // v5 and above.
   DCHECK(!dst.is(pc) && !src.is(pc));
   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
        15*B8 | CLZ | src.code());
@@ -1724,8 +1830,7 @@
                      int lsb,
                      int width,
                      Condition cond) {
-  // v7 and above.
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   DCHECK(!dst.is(pc) && !src.is(pc));
   DCHECK((lsb >= 0) && (lsb <= 31));
   DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1744,8 +1849,7 @@
                      int lsb,
                      int width,
                      Condition cond) {
-  // v7 and above.
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   DCHECK(!dst.is(pc) && !src.is(pc));
   DCHECK((lsb >= 0) && (lsb <= 31));
   DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1759,8 +1863,7 @@
 // to zero, preserving the value of the other bits.
 //   bfc dst, #lsb, #width
 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
-  // v7 and above.
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   DCHECK(!dst.is(pc));
   DCHECK((lsb >= 0) && (lsb <= 31));
   DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1778,8 +1881,7 @@
                     int lsb,
                     int width,
                     Condition cond) {
-  // v7 and above.
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(IsEnabled(ARMv7));
   DCHECK(!dst.is(pc) && !src.is(pc));
   DCHECK((lsb >= 0) && (lsb <= 31));
   DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -2176,8 +2278,7 @@
 #endif  // def __arm__
 }
 
-
-void Assembler::bkpt(uint32_t imm16) {  // v5 and above
+void Assembler::bkpt(uint32_t imm16) {
   DCHECK(is_uint16(imm16));
   emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
 }
@@ -2190,17 +2291,38 @@
 
 
 void Assembler::dmb(BarrierOption option) {
-  emit(kSpecialCondition | 0x57ff*B12 | 5*B4 | option);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    // Details available in ARM DDI 0406C.b, A8-378.
+    emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
+  } else {
+    // Details available in ARM DDI 0406C.b, B3-1750.
+    // CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
+    mcr(p15, 0, r0, cr7, cr10, 5);
+  }
 }
 
 
 void Assembler::dsb(BarrierOption option) {
-  emit(kSpecialCondition | 0x57ff*B12 | 4*B4 | option);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    // Details available in ARM DDI 0406C.b, A8-380.
+    emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
+  } else {
+    // Details available in ARM DDI 0406C.b, B3-1750.
+    // CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
+    mcr(p15, 0, r0, cr7, cr10, 4);
+  }
 }
 
 
 void Assembler::isb(BarrierOption option) {
-  emit(kSpecialCondition | 0x57ff*B12 | 6*B4 | option);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    // Details available in ARM DDI 0406C.b, A8-389.
+    emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
+  } else {
+    // Details available in ARM DDI 0406C.b, B3-1750.
+    // CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
+    mcr(p15, 0, r0, cr7, cr5, 4);
+  }
 }
 
 
@@ -2217,13 +2339,8 @@
        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
 }
 
-
-void Assembler::cdp2(Coprocessor coproc,
-                     int opcode_1,
-                     CRegister crd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
+void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
+                     CRegister crn, CRegister crm, int opcode_2) {
   cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
 }
 
@@ -2240,13 +2357,8 @@
        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
 }
 
-
-void Assembler::mcr2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
+void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
+                     CRegister crn, CRegister crm, int opcode_2) {
   mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
 }
 
@@ -2263,13 +2375,8 @@
        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
 }
 
-
-void Assembler::mrc2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
+void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
+                     CRegister crn, CRegister crm, int opcode_2) {
   mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
 }
 
@@ -2295,20 +2402,13 @@
        coproc*B8 | (option & 255));
 }
 
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     const MemOperand& src,
-                     LFlag l) {  // v5 and above
+void Assembler::ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+                     LFlag l) {
   ldc(coproc, crd, src, l, kSpecialCondition);
 }
 
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
+void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn, int option,
+                     LFlag l) {
   ldc(coproc, crd, rn, option, l, kSpecialCondition);
 }
 
@@ -2323,6 +2423,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-924.
   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
   // Vd(15-12) | 1011(11-8) | offset
+  DCHECK(VfpRegisterIsAvailable(dst));
   int u = 1;
   if (offset < 0) {
     CHECK(offset != kMinInt);
@@ -2353,6 +2454,7 @@
 void Assembler::vldr(const DwVfpRegister dst,
                      const MemOperand& operand,
                      const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(dst));
   DCHECK(operand.am_ == Offset);
   if (operand.rm().is_valid()) {
     add(ip, operand.rn(),
@@ -2420,6 +2522,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-1082.
   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
   // Vd(15-12) | 1011(11-8) | (offset/4)
+  DCHECK(VfpRegisterIsAvailable(src));
   int u = 1;
   if (offset < 0) {
     CHECK(offset != kMinInt);
@@ -2450,6 +2553,7 @@
 void Assembler::vstr(const DwVfpRegister src,
                      const MemOperand& operand,
                      const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(src));
   DCHECK(operand.am_ == Offset);
   if (operand.rm().is_valid()) {
     add(ip, operand.rn(),
@@ -2508,16 +2612,13 @@
   }
 }
 
-
-void  Assembler::vldm(BlockAddrMode am,
-                      Register base,
-                      DwVfpRegister first,
-                      DwVfpRegister last,
-                      Condition cond) {
+void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
+                     DwVfpRegister last, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8-922.
   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count * 2)
   DCHECK_LE(first.code(), last.code());
+  DCHECK(VfpRegisterIsAvailable(last));
   DCHECK(am == ia || am == ia_w || am == db_w);
   DCHECK(!base.is(pc));
 
@@ -2529,16 +2630,13 @@
        0xB*B8 | count*2);
 }
 
-
-void  Assembler::vstm(BlockAddrMode am,
-                      Register base,
-                      DwVfpRegister first,
-                      DwVfpRegister last,
-                      Condition cond) {
+void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
+                     DwVfpRegister last, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8-1080.
   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count * 2)
   DCHECK_LE(first.code(), last.code());
+  DCHECK(VfpRegisterIsAvailable(last));
   DCHECK(am == ia || am == ia_w || am == db_w);
   DCHECK(!base.is(pc));
 
@@ -2550,11 +2648,8 @@
        0xB*B8 | count*2);
 }
 
-void  Assembler::vldm(BlockAddrMode am,
-                      Register base,
-                      SwVfpRegister first,
-                      SwVfpRegister last,
-                      Condition cond) {
+void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
+                     SwVfpRegister last, Condition cond) {
   // Instruction details available in ARM DDI 0406A, A8-626.
   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   // first(15-12) | 1010(11-8) | (count/2)
@@ -2569,12 +2664,8 @@
        0xA*B8 | count);
 }
 
-
-void  Assembler::vstm(BlockAddrMode am,
-                      Register base,
-                      SwVfpRegister first,
-                      SwVfpRegister last,
-                      Condition cond) {
+void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
+                     SwVfpRegister last, Condition cond) {
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count/2)
@@ -2602,8 +2693,6 @@
 // Only works for little endian floating point formats.
 // We don't support VFP on the mixed endian floating point platform.
 static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
-  DCHECK(CpuFeatures::IsSupported(VFP3));
-
   // VMOV can accept an immediate of the form:
   //
   //  +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@@ -2652,7 +2741,8 @@
 
 void Assembler::vmov(const SwVfpRegister dst, float imm) {
   uint32_t enc;
-  if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+  if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
+    CpuFeatureScope scope(this, VFPv3);
     // The float can be encoded in the instruction.
     //
     // Sd = immediate
@@ -2672,6 +2762,8 @@
 void Assembler::vmov(const DwVfpRegister dst,
                      double imm,
                      const Register scratch) {
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(!scratch.is(ip));
   uint32_t enc;
   // If the embedded constant pool is disabled, we can use the normal, inline
   // constant pool. If the embedded constant pool is enabled (via
@@ -2679,7 +2771,8 @@
   // pointer (pp) is valid.
   bool can_use_pool =
       !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
-  if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+  if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
+    CpuFeatureScope scope(this, VFPv3);
     // The double can be encoded in the instruction.
     //
     // Dd = immediate
@@ -2689,7 +2782,9 @@
     int vd, d;
     dst.split_code(&vd, &d);
     emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
-  } else if (FLAG_enable_vldr_imm && can_use_pool) {
+  } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm &&
+             can_use_pool) {
+    CpuFeatureScope scope(this, ARMv7);
     // TODO(jfb) Temporarily turned off until we have constant blinding or
     //           some equivalent mitigation: an attacker can otherwise control
     //           generated data which also happens to be executable, a Very Bad
@@ -2732,6 +2827,7 @@
       vmov(dst, VmovIndexLo, ip);
       if (((lo & 0xffff) == (hi & 0xffff)) &&
           CpuFeatures::IsSupported(ARMv7)) {
+        CpuFeatureScope scope(this, ARMv7);
         movt(ip, hi >> 16);
       } else {
         mov(ip, Operand(hi));
@@ -2767,6 +2863,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-938.
   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -2784,6 +2882,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-940.
   // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
   // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
   DCHECK(index.index == 0 || index.index == 1);
   int vd, d;
   dst.split_code(&vd, &d);
@@ -2800,6 +2899,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.342.
   // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
   // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+  DCHECK(VfpRegisterIsAvailable(src));
   DCHECK(index.index == 0 || index.index == 1);
   int vn, n;
   src.split_code(&vn, &n);
@@ -2816,6 +2916,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-948.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+  DCHECK(VfpRegisterIsAvailable(dst));
   DCHECK(!src1.is(pc) && !src2.is(pc));
   int vm, m;
   dst.split_code(&vm, &m);
@@ -2832,6 +2933,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-948.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+  DCHECK(VfpRegisterIsAvailable(src));
   DCHECK(!dst1.is(pc) && !dst2.is(pc));
   int vm, m;
   src.split_code(&vm, &m);
@@ -2985,6 +3087,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(dst));
   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
 }
 
@@ -3001,6 +3104,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(dst));
   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
 }
 
@@ -3027,6 +3131,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(src));
   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -3035,6 +3140,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(src));
   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -3043,6 +3149,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(dst));
   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
 }
 
@@ -3051,6 +3158,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
+  DCHECK(VfpRegisterIsAvailable(src));
   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -3061,8 +3169,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-874.
   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
   // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
+  DCHECK(IsEnabled(VFPv3));
+  DCHECK(VfpRegisterIsAvailable(dst));
   DCHECK(fraction_bits > 0 && fraction_bits <= 32);
-  DCHECK(CpuFeatures::IsSupported(VFP3));
   int vd, d;
   dst.split_code(&vd, &d);
   int imm5 = 32 - fraction_bits;
@@ -3079,6 +3188,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-968.
   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3110,6 +3221,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-524.
   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
   // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3142,6 +3255,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-830.
   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3180,6 +3296,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-1086.
   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3218,6 +3337,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-960.
   // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3254,6 +3376,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-932.
   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3288,6 +3413,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-932.
   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3324,6 +3452,9 @@
   // Instruction details available in ARM DDI 0406C.b, A8-882.
   // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3360,6 +3491,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-864.
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(src1));
+  DCHECK(VfpRegisterIsAvailable(src2));
   int vd, d;
   src1.split_code(&vd, &d);
   int vm, m;
@@ -3391,6 +3524,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-864.
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
+  DCHECK(VfpRegisterIsAvailable(src1));
   DCHECK(src2 == 0.0);
   int vd, d;
   src1.split_code(&vd, &d);
@@ -3411,12 +3545,76 @@
        0x5 * B9 | B6);
 }
 
+void Assembler::vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
+                       const DwVfpRegister src2) {
+  // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+       0x5 * B9 | B8 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
+                       const SwVfpRegister src2) {
+  // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+       0x5 * B9 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
+                       const DwVfpRegister src2) {
+  // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+       0x5 * B9 | B8 | n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
+                       const SwVfpRegister src2) {
+  // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+       0x5 * B9 | n * B7 | B6 | m * B5 | vm);
+}
+
 void Assembler::vsel(Condition cond, const DwVfpRegister dst,
                      const DwVfpRegister src1, const DwVfpRegister src2) {
   // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
   // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
   // 0(6) | M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3448,7 +3646,7 @@
   // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
   // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
   // 0(6) | M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vn, n;
@@ -3481,6 +3679,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-1058.
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3524,7 +3724,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=00(17-16) |  Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3538,7 +3738,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=00(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3552,7 +3752,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=01(17-16) |  Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3566,7 +3766,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=01(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3580,7 +3780,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=10(17-16) |  Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3594,7 +3794,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=10(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3608,7 +3808,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=11(17-16) |  Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3622,7 +3822,7 @@
   // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
   // 10(19-18) | RM=11(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
   // M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3636,7 +3836,7 @@
                        const Condition cond) {
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
   // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3650,7 +3850,7 @@
                        const Condition cond) {
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  DCHECK(IsEnabled(ARMv8));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3668,7 +3868,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.320.
   // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
-  DCHECK(CpuFeatures::IsSupported(NEON));
+  DCHECK(IsEnabled(NEON));
   int vd, d;
   dst.base().split_code(&vd, &d);
   emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
@@ -3682,7 +3882,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.404.
   // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
-  DCHECK(CpuFeatures::IsSupported(NEON));
+  DCHECK(IsEnabled(NEON));
   int vd, d;
   src.base().split_code(&vd, &d);
   emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
@@ -3694,7 +3894,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.346.
   // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
   // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
-  DCHECK(CpuFeatures::IsSupported(NEON));
+  DCHECK(IsEnabled(NEON));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3703,6 +3903,29 @@
         (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
 }
 
+void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+  DCHECK(VfpRegisterIsAvailable(srcdst0));
+  DCHECK(VfpRegisterIsAvailable(srcdst1));
+  DCHECK(!srcdst0.is(kScratchDoubleReg));
+  DCHECK(!srcdst1.is(kScratchDoubleReg));
+
+  if (srcdst0.is(srcdst1)) return;  // Swapping aliased registers emits nothing.
+
+  if (CpuFeatures::IsSupported(NEON)) {
+    // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+    // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+    // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+    int vd, d;
+    srcdst0.split_code(&vd, &d);
+    int vm, m;
+    srcdst1.split_code(&vm, &m);
+    emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
+  } else {
+    vmov(kScratchDoubleReg, srcdst0);
+    vmov(srcdst0, srcdst1);
+    vmov(srcdst1, kScratchDoubleReg);
+  }
+}
 
 // Pseudo instructions.
 void Assembler::nop(int type) {
@@ -4208,6 +4431,7 @@
   Instr instr = instr_at(pc);
   if (access == ConstantPoolEntry::OVERFLOWED) {
     if (CpuFeatures::IsSupported(ARMv7)) {
+      CpuFeatureScope scope(this, ARMv7);
       // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
       Instr next_instr = instr_at(pc + kInstrSize);
       DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 0b9cd91..e5448f7 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1022,7 +1022,8 @@
   void bkpt(uint32_t imm16);  // v5 and above
   void svc(uint32_t imm24, Condition cond = al);
 
-  // Synchronization instructions
+  // Synchronization instructions.
+  // On ARMv6, an equivalent CP15 operation will be used.
   void dmb(BarrierOption option);
   void dsb(BarrierOption option);
   void isb(BarrierOption option);
@@ -1258,6 +1259,19 @@
   void vcmp(const SwVfpRegister src1, const float src2,
             const Condition cond = al);
 
+  void vmaxnm(const DwVfpRegister dst,
+              const DwVfpRegister src1,
+              const DwVfpRegister src2);
+  void vmaxnm(const SwVfpRegister dst,
+              const SwVfpRegister src1,
+              const SwVfpRegister src2);
+  void vminnm(const DwVfpRegister dst,
+              const DwVfpRegister src1,
+              const DwVfpRegister src2);
+  void vminnm(const SwVfpRegister dst,
+              const SwVfpRegister src1,
+              const SwVfpRegister src2);
+
   // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
   void vsel(const Condition cond,
             const DwVfpRegister dst,
@@ -1289,8 +1303,8 @@
               const Condition cond = al);
 
   // Support for NEON.
-  // All these APIs support D0 to D31 and Q0 to Q15.
 
+  // All these APIs support D0 to D31 and Q0 to Q15.
   void vld1(NeonSize size,
             const NeonListOperand& dst,
             const NeonMemOperand& src);
@@ -1299,6 +1313,9 @@
             const NeonMemOperand& dst);
   void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
 
+  // Currently, vswp supports only D0 to D31.
+  void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+
   // Pseudo instructions
 
   // Different nop operations are used by the code generator to detect certain
@@ -1586,6 +1603,12 @@
            (pc_offset() < no_const_pool_before_);
   }
 
+  bool VfpRegisterIsAvailable(DwVfpRegister reg) {
+    DCHECK(reg.is_valid());
+    return IsEnabled(VFP32DREGS) ||
+           (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
+  }
+
  private:
   int next_buffer_check_;  // pc offset of next buffer check
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 264f24f..de6803f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -553,17 +553,14 @@
   // 3) Fall through to both_loaded_as_doubles.
   // 4) Jump to lhs_not_nan.
   // In cases 3 and 4 we have found out we were dealing with a number-number
-  // comparison.  If VFP3 is supported the double values of the numbers have
-  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
-  // into r0, r1, r2, and r3.
+  // comparison. The double values of the numbers have been loaded into d7 (lhs)
+  // and d6 (rhs).
   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
 
   __ bind(&both_loaded_as_doubles);
-  // The arguments have been converted to doubles and stored in d6 and d7, if
-  // VFP3 is supported, or in r0, r1, r2, and r3.
+  // The arguments have been converted to doubles and stored in d6 and d7.
   __ bind(&lhs_not_nan);
   Label no_nan;
-  // ARMv7 VFP3 instructions to implement double precision comparison.
   __ VFPCompareAndSetFlags(d7, d6);
   Label nan;
   __ b(vs, &nan);
@@ -1646,7 +1643,6 @@
   // r2 : feedback vector
   // r3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1666,7 +1662,7 @@
   Register weak_value = r9;
   __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
   __ cmp(r1, weak_value);
-  __ b(eq, &done_increment_count);
+  __ b(eq, &done);
   __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
   __ b(eq, &done);
   __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
@@ -1689,7 +1685,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   __ cmp(r1, r5);
   __ b(ne, &megamorphic);
-  __ jmp(&done_increment_count);
+  __ jmp(&done);
 
   __ bind(&miss);
 
@@ -1718,32 +1714,22 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done_initialize_count);
+  __ b(&done);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ Move(r5, Operand(Smi::FromInt(1)));
-  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
-  __ b(&done);
+  __ bind(&done);
 
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ ldr(r4, FieldMemOperand(r5, 0));
   __ add(r4, r4, Operand(Smi::FromInt(1)));
   __ str(r4, FieldMemOperand(r5, 0));
-
-  __ bind(&done);
 }
 
-
 void CallConstructStub::Generate(MacroAssembler* masm) {
   // r0 : number of arguments
   // r1 : the function to call
@@ -1785,6 +1771,17 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ add(feedback_vector, feedback_vector,
+         Operand::PointerOffsetFromSmiKey(slot));
+  __ add(feedback_vector, feedback_vector,
+         Operand(FixedArray::kHeaderSize + kPointerSize));
+  __ ldr(slot, FieldMemOperand(feedback_vector, 0));
+  __ add(slot, slot, Operand(Smi::FromInt(1)));
+  __ str(slot, FieldMemOperand(feedback_vector, 0));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // r1 - function
@@ -1798,11 +1795,7 @@
   __ mov(r0, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(1)));
-  __ str(r3, FieldMemOperand(r2, 0));
+  IncrementCallCount(masm, r2, r3);
 
   __ mov(r2, r4);
   __ mov(r3, r1);
@@ -1815,7 +1808,7 @@
   // r1 - function
   // r3 - slot id (Smi)
   // r2 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1845,14 +1838,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(r1, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(1)));
-  __ str(r3, FieldMemOperand(r2, 0));
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, r2, r3);
+
   __ mov(r0, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1893,6 +1883,11 @@
   __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, r2, r3);
+
+  __ bind(&call_count_incremented);
   __ mov(r0, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1919,11 +1914,6 @@
   __ cmp(r4, ip);
   __ b(ne, &miss);
 
-  // Initialize the call counter.
-  __ Move(r5, Operand(Smi::FromInt(1)));
-  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // r2 - vector
   // r3 - slot
@@ -1931,9 +1921,13 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(r2);
+    __ Push(r3);
     __ Push(cp, r1);
     __ CallStub(&create_stub);
     __ Pop(cp, r1);
+    __ Pop(r3);
+    __ Pop(r2);
   }
 
   __ jmp(&call_function);
@@ -1943,7 +1937,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ jmp(&call);
+  __ jmp(&call_count_incremented);
 }
 
 
@@ -2131,291 +2125,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  lr: return address
-  //  sp[0]: to
-  //  sp[4]: from
-  //  sp[8]: string
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length.
-  // If any of these assumptions fail, we call the runtime system.
-
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
-
-  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
-  STATIC_ASSERT(kFromOffset == kToOffset + 4);
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
-  // Arithmetic shift right by one un-smi-tags. In this case we rotate right
-  // instead because we bail out on non-smi values: ROR and ASR are equivalent
-  // for smis but they set the flags in a way that's easier to optimize.
-  __ mov(r2, Operand(r2, ROR, 1), SetCC);
-  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
-  // If either to or from had the smi tag bit set, then C is set now, and N
-  // has the same value: we rotated by 1, so the bottom bit is now the top bit.
-  // We want to bailout to runtime here if From is negative.  In that case, the
-  // next instruction is not executed and we fall through to bailing out to
-  // runtime.
-  // Executed if both r2 and r3 are untagged integers.
-  __ sub(r2, r2, Operand(r3), SetCC, cc);
-  // One of the above un-smis or the above SUB could have set N==1.
-  __ b(mi, &runtime);  // Either "from" or "to" is not an smi, or from > to.
-
-  // Make sure first argument is a string.
-  __ ldr(r0, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(r0, &runtime);
-  Condition is_string = masm->IsObjectStringType(r0, r1);
-  __ b(NegateCondition(is_string), &runtime);
-
-  Label single_char;
-  __ cmp(r2, Operand(1));
-  __ b(eq, &single_char);
-
-  // Short-cut for the case of trivial substring.
-  Label return_r0;
-  // r0: original string
-  // r2: result string length
-  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
-  __ cmp(r2, Operand(r4, ASR, 1));
-  // Return original string.
-  __ b(eq, &return_r0);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ b(hi, &runtime);
-  // Shorter than original string's length: an actual substring.
-
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into r5.
-  // r0: original string
-  // r1: instance type
-  // r2: length
-  // r3: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ tst(r1, Operand(kIsIndirectStringMask));
-  __ b(eq, &seq_or_external_string);
-
-  __ tst(r1, Operand(kSlicedNotConsMask));
-  __ b(ne, &sliced_string);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
-  __ CompareRoot(r5, Heap::kempty_stringRootIndex);
-  __ b(ne, &runtime);
-  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
-  // Update instance type.
-  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
-  // Update instance type.
-  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(r5, r0);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // r5: underlying subject string
-    // r1: instance type of underlying subject string
-    // r2: length
-    // r3: adjusted start index (untagged)
-    __ cmp(r2, Operand(SlicedString::kMinLength));
-    // Short slice.  Copy instead of slicing.
-    __ b(lt, &copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ tst(r1, Operand(kStringEncodingMask));
-    __ b(eq, &two_byte_slice);
-    __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
-    __ bind(&set_slice_header);
-    __ mov(r3, Operand(r3, LSL, 1));
-    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-    __ jmp(&return_r0);
-
-    __ bind(&copy_routine);
-  }
-
-  // r5: underlying subject string
-  // r1: instance type of underlying subject string
-  // r2: length
-  // r3: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(r1, Operand(kExternalStringTag));
-  __ b(eq, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ tst(r1, Operand(kShortExternalStringTag));
-  __ b(ne, &runtime);
-  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
-  // r5 already points to the first character of underlying string.
-  __ jmp(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ tst(r1, Operand(kStringEncodingMask));
-  __ b(eq, &two_byte_sequential);
-
-  // Allocate and copy the resulting one-byte string.
-  __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
-
-  // Locate first character of substring to copy.
-  __ add(r5, r5, r3);
-  // Locate first character of result.
-  __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // r0: result string
-  // r1: first character of result string
-  // r2: result string length
-  // r5: first character of substring to copy
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
-  __ jmp(&return_r0);
-
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
-
-  // Locate first character of substring to copy.
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ add(r5, r5, Operand(r3, LSL, 1));
-  // Locate first character of result.
-  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // r0: result string.
-  // r1: first character of result.
-  // r2: result length.
-  // r5: first character of substring to copy.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
-
-  __ bind(&return_r0);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
-  __ Drop(3);
-  __ Ret();
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // r0: original string
-  // r1: instance type
-  // r2: length
-  // r3: from index (untagged)
-  __ SmiTag(r3, r3);
-  StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ Drop(3);
-  __ Ret();
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in r0.
-  Label is_number;
-  __ JumpIfSmi(r0, &is_number);
-
-  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  __ Ret(lo);
-
-  Label not_heap_number;
-  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
-  __ b(ne, &not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ cmp(r1, Operand(ODDBALL_TYPE));
-  __ b(ne, &not_oddball);
-  __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in r0.
-  Label is_number;
-  __ JumpIfSmi(r0, &is_number);
-
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  __ Ret(ls);
-
-  Label not_heap_number;
-  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
-  __ b(ne, &not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ cmp(r1, Operand(ODDBALL_TYPE));
-  __ b(ne, &not_oddball);
-  __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3275,16 +2984,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
-  __ ldr(regs_.scratch1(),
-         MemOperand(regs_.scratch0(),
-                    MemoryChunk::kWriteBarrierCounterOffset));
-  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
-  __ str(regs_.scratch1(),
-         MemOperand(regs_.scratch0(),
-                    MemoryChunk::kWriteBarrierCounterOffset));
-  __ b(mi, &need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3712,7 +3411,7 @@
   __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
 
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ mov(feedback, too_far);
 
   __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4425,7 +4124,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+    __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
     __ b(gt, &too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4763,7 +4462,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+  __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
   __ b(gt, &too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7580145..e63da5c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -39,6 +39,7 @@
   Label less_4;
 
   if (CpuFeatures::IsSupported(NEON)) {
+    CpuFeatureScope scope(&masm, NEON);
     Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
     Label size_less_than_8;
     __ pld(MemOperand(src, 0));
@@ -193,6 +194,7 @@
   Register src = r1;
   Register chars = r2;
   if (CpuFeatures::IsSupported(NEON)) {
+    CpuFeatureScope scope(&masm, NEON);
     Register temp = r3;
     Label loop;
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index a162051..2bade20 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -477,40 +477,42 @@
     *reinterpret_cast<Instr*>(this) = value;
   }
 
-  // Read one particular bit out of the instruction bits.
+  // Extract a single bit from the instruction bits and return it as bit 0 in
+  // the result.
   inline int Bit(int nr) const {
     return (InstructionBits() >> nr) & 1;
   }
 
-  // Read a bit field's value out of the instruction bits.
+  // Extract a bit field <hi:lo> from the instruction bits and return it in the
+  // least-significant bits of the result.
   inline int Bits(int hi, int lo) const {
     return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
   }
 
-  // Read a bit field out of the instruction bits.
+  // Read a bit field <hi:lo>, leaving its position unchanged in the result.
   inline int BitField(int hi, int lo) const {
     return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
   }
 
   // Static support.
 
-  // Read one particular bit out of the instruction bits.
+  // Extract a single bit from the instruction bits and return it as bit 0 in
+  // the result.
   static inline int Bit(Instr instr, int nr) {
     return (instr >> nr) & 1;
   }
 
-  // Read the value of a bit field out of the instruction bits.
+  // Extract a bit field <hi:lo> from the instruction bits and return it in the
+  // least-significant bits of the result.
   static inline int Bits(Instr instr, int hi, int lo) {
     return (instr >> lo) & ((2 << (hi - lo)) - 1);
   }
 
-
-  // Read a bit field out of the instruction bits.
+  // Read a bit field <hi:lo>, leaving its position unchanged in the result.
   static inline int BitField(Instr instr, int hi, int lo) {
     return instr & (((2 << (hi - lo)) - 1) << lo);
   }
 
-
   // Accessors for the different named fields used in the ARM encoding.
   // The naming of these accessor corresponds to figure A3-1.
   //
@@ -525,13 +527,11 @@
 
 
   // Generally applicable fields
-  inline Condition ConditionValue() const {
-    return static_cast<Condition>(Bits(31, 28));
-  }
+  inline int ConditionValue() const { return Bits(31, 28); }
   inline Condition ConditionField() const {
     return static_cast<Condition>(BitField(31, 28));
   }
-  DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+  DECLARE_STATIC_TYPED_ACCESSOR(int, ConditionValue);
   DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
 
   inline int TypeValue() const { return Bits(27, 25); }
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index c569e66..e49fed9 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -119,14 +119,20 @@
   DCHECK(kDoubleRegZero.code() == 14);
   DCHECK(kScratchDoubleReg.code() == 15);
 
-  // Check CPU flags for number of registers, setting the Z condition flag.
-  __ CheckFor32DRegs(ip);
+  {
+    // We use a run-time check for VFP32DREGS.
+    CpuFeatureScope scope(masm(), VFP32DREGS,
+                          CpuFeatureScope::kDontCheckSupported);
 
-  // Push registers d0-d15, and possibly d16-d31, on the stack.
-  // If d16-d31 are not pushed, decrease the stack pointer instead.
-  __ vstm(db_w, sp, d16, d31, ne);
-  __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
-  __ vstm(db_w, sp, d0, d15);
+    // Check CPU flags for number of registers, setting the Z condition flag.
+    __ CheckFor32DRegs(ip);
+
+    // Push registers d0-d15, and possibly d16-d31, on the stack.
+    // If d16-d31 are not pushed, decrease the stack pointer instead.
+    __ vstm(db_w, sp, d16, d31, ne);
+    __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+    __ vstm(db_w, sp, d0, d15);
+  }
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
   // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -259,9 +265,6 @@
   __ cmp(r4, r1);
   __ b(lt, &outer_push_loop);
 
-  // Check CPU flags for number of registers, setting the Z condition flag.
-  __ CheckFor32DRegs(ip);
-
   __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 1e1c75d..e408e85 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -105,6 +105,8 @@
   void DecodeType6(Instruction* instr);
   // Type 7 includes special Debugger instructions.
   int DecodeType7(Instruction* instr);
+  // CP15 coprocessor instructions.
+  void DecodeTypeCP15(Instruction* instr);
   // For VFP support.
   void DecodeTypeVFP(Instruction* instr);
   void DecodeType6CoprocessorIns(Instruction* instr);
@@ -1279,18 +1281,16 @@
           break;
         }
       }
-      if (FLAG_enable_sudiv) {
-        if (instr->Bits(5, 4) == 0x1) {
-          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
-            if (instr->Bit(21) == 0x1) {
-              // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
-              Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
-            } else {
-              // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
-              Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
-            }
-            break;
+      if (instr->Bits(5, 4) == 0x1) {
+        if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+          if (instr->Bit(21) == 0x1) {
+            // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+            Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+          } else {
+            // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+            Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
           }
+          break;
         }
       }
       Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
@@ -1374,7 +1374,18 @@
       Format(instr, "svc'cond 'svc");
     }
   } else {
-    DecodeTypeVFP(instr);
+    switch (instr->CoprocessorValue()) {
+      case 10:  // Fall through.
+      case 11:
+        DecodeTypeVFP(instr);
+        break;
+      case 15:
+        DecodeTypeCP15(instr);
+        break;
+      default:
+        Unknown(instr);
+        break;
+    }
   }
   return Instruction::kInstrSize;
 }
@@ -1556,6 +1567,34 @@
   }
 }
 
+void Decoder::DecodeTypeCP15(Instruction* instr) {
+  VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
+  VERIFY(instr->CoprocessorValue() == 15);
+
+  if (instr->Bit(4) == 1) {
+    int crn = instr->Bits(19, 16);
+    int crm = instr->Bits(3, 0);
+    int opc1 = instr->Bits(23, 21);
+    int opc2 = instr->Bits(7, 5);
+    if ((opc1 == 0) && (crn == 7)) {
+      // ARMv6 memory barrier operations.
+      // Details available in ARM DDI 0406C.b, B3-1750.
+      if ((crm == 10) && (opc2 == 5)) {
+        Format(instr, "mcr'cond (CP15DMB)");
+      } else if ((crm == 10) && (opc2 == 4)) {
+        Format(instr, "mcr'cond (CP15DSB)");
+      } else if ((crm == 5) && (opc2 == 4)) {
+        Format(instr, "mcr'cond (CP15ISB)");
+      } else {
+        Unknown(instr);
+      }
+    } else {
+      Unknown(instr);
+    }
+  } else {
+    Unknown(instr);
+  }
+}
 
 void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
     Instruction* instr) {
@@ -1786,6 +1825,13 @@
         int imm3 = instr->Bits(21, 19);
         out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                     "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+      } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
+                 (instr->Bit(4) == 0)) {
+        int Vd = instr->VFPDRegValue(kDoublePrecision);
+        int Vm = instr->VFPMRegValue(kDoublePrecision);
+        char rtype = (instr->Bit(6) == 0) ? 'd' : 'q';
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "vswp %c%d, %c%d", rtype, Vd, rtype, Vm);
       } else {
         Unknown(instr);
       }
@@ -1898,6 +1944,22 @@
             UNREACHABLE();  // Case analysis is exhaustive.
             break;
         }
+      } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
+                 (instr->Bit(4) == 0x0)) {
+        // VMAXNM, VMINNM (floating-point)
+        if (instr->SzValue() == 0x1) {
+          if (instr->Bit(6) == 0x1) {
+            Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
+          } else {
+            Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
+          }
+        } else {
+          if (instr->Bit(6) == 0x1) {
+            Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
+          } else {
+            Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
+          }
+        }
       } else {
         Unknown(instr);
       }
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index d26804a..a002b8d 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -42,13 +42,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return r3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
+const Register StoreTransitionDescriptor::MapRegister() { return r5; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
@@ -375,7 +371,7 @@
                                    &default_descriptor);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   static PlatformInterfaceDescriptor default_descriptor =
       PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
@@ -414,7 +410,19 @@
       r0,  // argument count (not including receiver)
       r3,  // new target
       r1,  // constructor to call
-      r2   // address of the first argument
+      r2,  // allocation site feedback if available, undefined otherwise
+      r4   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r0,  // argument count (not including receiver)
+      r1,  // target to call checked to be Array function
+      r2,  // allocation site feedback if available, undefined otherwise
+      r3   // address of the first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index a08673d..00f8ab5 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -250,15 +250,17 @@
   }
 }
 
-void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
+                          Condition cond) {
   if (!dst.is(src)) {
-    vmov(dst, src);
+    vmov(dst, src, cond);
   }
 }
 
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
+                          Condition cond) {
   if (!dst.is(src)) {
-    vmov(dst, src);
+    vmov(dst, src, cond);
   }
 }
 
@@ -285,6 +287,7 @@
              !src2.must_output_reloc_info(this) &&
              CpuFeatures::IsSupported(ARMv7) &&
              base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
+    CpuFeatureScope scope(this, ARMv7);
     ubfx(dst, src1, 0,
         WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
   } else {
@@ -303,6 +306,7 @@
       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
     }
   } else {
+    CpuFeatureScope scope(this, ARMv7);
     ubfx(dst, src1, lsb, width, cond);
   }
 }
@@ -323,6 +327,7 @@
       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
     }
   } else {
+    CpuFeatureScope scope(this, ARMv7);
     sbfx(dst, src1, lsb, width, cond);
   }
 }
@@ -346,6 +351,7 @@
     mov(scratch, Operand(scratch, LSL, lsb));
     orr(dst, dst, scratch);
   } else {
+    CpuFeatureScope scope(this, ARMv7);
     bfi(dst, src, lsb, width, cond);
   }
 }
@@ -358,6 +364,7 @@
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, src, Operand(mask));
   } else {
+    CpuFeatureScope scope(this, ARMv7);
     Move(dst, src, cond);
     bfc(dst, lsb, width, cond);
   }
@@ -404,15 +411,6 @@
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond) {
-  if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
-      isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
-      !predictable_code_size()) {
-    // The CPU supports fast immediate values, and this root will never
-    // change. We will load it as a relocatable immediate value.
-    Handle<Object> root = isolate()->heap()->root_handle(index);
-    mov(destination, Operand(root), LeaveCC, cond);
-    return;
-  }
   ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
 }
 
@@ -430,9 +428,7 @@
                                 Condition cond,
                                 Label* branch) {
   DCHECK(cond == eq || cond == ne);
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cond, branch);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
 }
 
 
@@ -1054,6 +1050,7 @@
     vmov(dst, VmovIndexLo, src);
   }
 }
+
 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
                              Register src_low, Register src_high,
                              Register scratch, Register shift) {
@@ -1971,7 +1968,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
@@ -2049,7 +2046,6 @@
   // point, so we cannot just use add().
   DCHECK(object_size > 0);
   Register source = result;
-  Condition cond = al;
   int shift = 0;
   while (object_size != 0) {
     if (((object_size >> shift) & 0x03) == 0) {
@@ -2060,9 +2056,8 @@
       shift += 8;
       Operand bits_operand(bits);
       DCHECK(bits_operand.instructions_required(this) == 1);
-      add(result_end, source, bits_operand, LeaveCC, cond);
+      add(result_end, source, bits_operand);
       source = result_end;
-      cond = cc;
     }
   }
 
@@ -2226,7 +2221,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
 
   // Make object size into bytes.
@@ -2261,7 +2256,6 @@
   // this point, so we cannot just use add().
   DCHECK(object_size > 0);
   Register source = result;
-  Condition cond = al;
   int shift = 0;
   while (object_size != 0) {
     if (((object_size >> shift) & 0x03) == 0) {
@@ -2272,9 +2266,8 @@
       shift += 8;
       Operand bits_operand(bits);
       DCHECK(bits_operand.instructions_required(this) == 1);
-      add(result_end, source, bits_operand, LeaveCC, cond);
+      add(result_end, source, bits_operand);
       source = result_end;
-      cond = cc;
     }
   }
 
@@ -2650,7 +2643,8 @@
 
 
 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFPv3)) {
+    CpuFeatureScope scope(this, VFPv3);
     vmov(value.low(), smi);
     vcvt_f64_s32(value, 1);
   } else {
@@ -2807,6 +2801,7 @@
                                          Register src,
                                          int num_least_bits) {
   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
+    CpuFeatureScope scope(this, ARMv7);
     ubfx(dst, src, kSmiTagSize, num_least_bits);
   } else {
     SmiUntag(dst, src);
@@ -3416,6 +3411,7 @@
 
 
 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
+  CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
   CheckFor32DRegs(scratch);
   vstm(db_w, location, d16, d31, ne);
   sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
@@ -3424,12 +3420,151 @@
 
 
 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
+  CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
   CheckFor32DRegs(scratch);
   vldm(ia_w, location, d0, d15);
   vldm(ia_w, location, d16, d31, ne);
   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
 }
 
+template <typename T>
+void MacroAssembler::FloatMaxHelper(T result, T left, T right,
+                                    Label* out_of_line) {
+  // This trivial case is caught sooner, so that the out-of-line code can be
+  // completely avoided.
+  DCHECK(!left.is(right));
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    CpuFeatureScope scope(this, ARMv8);
+    VFPCompareAndSetFlags(left, right);
+    b(vs, out_of_line);
+    vmaxnm(result, left, right);
+  } else {
+    Label done;
+    VFPCompareAndSetFlags(left, right);
+    b(vs, out_of_line);
+    // Avoid a conditional instruction if the result register is unique.
+    bool aliased_result_reg = result.is(left) || result.is(right);
+    Move(result, right, aliased_result_reg ? mi : al);
+    Move(result, left, gt);
+    b(ne, &done);
+    // Left and right are equal, but check for +/-0.
+    VFPCompareAndSetFlags(left, 0.0);
+    b(eq, out_of_line);
+    // The arguments are equal and not zero, so it doesn't matter which input we
+    // pick. We have already moved one input into the result (if it didn't
+    // already alias) so there's nothing more to do.
+    bind(&done);
+  }
+}
+
+template <typename T>
+void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
+  DCHECK(!left.is(right));
+
+  // ARMv8: At least one of left and right is a NaN.
+  // Anything else: At least one of left and right is a NaN, or both left and
+  // right are zeroes with unknown sign.
+
+  // If left and right are +/-0, select the one with the most positive sign.
+  // If left or right are NaN, vadd propagates the appropriate one.
+  vadd(result, left, right);
+}
+
+template <typename T>
+void MacroAssembler::FloatMinHelper(T result, T left, T right,
+                                    Label* out_of_line) {
+  // This trivial case is caught sooner, so that the out-of-line code can be
+  // completely avoided.
+  DCHECK(!left.is(right));
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    CpuFeatureScope scope(this, ARMv8);
+    VFPCompareAndSetFlags(left, right);
+    b(vs, out_of_line);
+    vminnm(result, left, right);
+  } else {
+    Label done;
+    VFPCompareAndSetFlags(left, right);
+    b(vs, out_of_line);
+    // Avoid a conditional instruction if the result register is unique.
+    bool aliased_result_reg = result.is(left) || result.is(right);
+    Move(result, left, aliased_result_reg ? mi : al);
+    Move(result, right, gt);
+    b(ne, &done);
+    // Left and right are equal, but check for +/-0.
+    VFPCompareAndSetFlags(left, 0.0);
+    // If the arguments are equal and not zero, it doesn't matter which input we
+    // pick. We have already moved one input into the result (if it didn't
+    // already alias) so there's nothing more to do.
+    b(ne, &done);
+    // At this point, both left and right are either 0 or -0.
+    // We could use a single 'vorr' instruction here if we had NEON support.
+    // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
+    // as -((-L) - R).
+    if (left.is(result)) {
+      DCHECK(!right.is(result));
+      vneg(result, left);
+      vsub(result, result, right);
+      vneg(result, result);
+    } else {
+      DCHECK(!left.is(result));
+      vneg(result, right);
+      vsub(result, result, left);
+      vneg(result, result);
+    }
+    bind(&done);
+  }
+}
+
+template <typename T>
+void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
+  DCHECK(!left.is(right));
+
+  // At least one of left and right is a NaN. Use vadd to propagate the NaN
+  // appropriately. +/-0 is handled inline.
+  vadd(result, left, right);
+}
+
+void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
+                              SwVfpRegister right, Label* out_of_line) {
+  FloatMaxHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
+                              SwVfpRegister right, Label* out_of_line) {
+  FloatMinHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
+                              DwVfpRegister right, Label* out_of_line) {
+  FloatMaxHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
+                              DwVfpRegister right, Label* out_of_line) {
+  FloatMinHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+                                       SwVfpRegister right) {
+  FloatMaxOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+                                       SwVfpRegister right) {
+  FloatMinOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+                                       DwVfpRegister right) {
+  FloatMaxOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+                                       DwVfpRegister right) {
+  FloatMinOutOfLineHelper(result, left, right);
+}
 
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
     Register first, Register second, Register scratch1, Register scratch2,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2f1b3c2..d524d84 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -123,6 +123,18 @@
   void CallDeoptimizer(Address target);
   static int CallDeoptimizerSize();
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
@@ -170,8 +182,8 @@
       mov(dst, src, sbit, cond);
     }
   }
-  void Move(SwVfpRegister dst, SwVfpRegister src);
-  void Move(DwVfpRegister dst, DwVfpRegister src);
+  void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
+  void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
 
   void Load(Register dst, const MemOperand& src, Representation r);
   void Store(Register src, const MemOperand& dst, Representation r);
@@ -1082,6 +1094,32 @@
   // values to location, restoring [d0..(d15|d31)].
   void RestoreFPRegs(Register location, Register scratch);
 
+  // Perform a floating-point min or max operation with the
+  // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
+  // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
+  // code. The specific behaviour depends on supported instructions.
+  //
+  // These functions assume (and assert) that !left.is(right). It is permitted
+  // for the result to alias either input register.
+  void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+                Label* out_of_line);
+  void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+                Label* out_of_line);
+  void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+                Label* out_of_line);
+  void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+                Label* out_of_line);
+
+  // Generate out-of-line cases for the macros above.
+  void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+                         SwVfpRegister right);
+  void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+                         SwVfpRegister right);
+  void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+                         DwVfpRegister right);
+  void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+                         DwVfpRegister right);
+
   // ---------------------------------------------------------------------------
   // Runtime calls
 
@@ -1513,6 +1551,16 @@
   MemOperand SafepointRegisterSlot(Register reg);
   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
+  // Implementation helpers for FloatMin and FloatMax.
+  template <typename T>
+  void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
+  template <typename T>
+  void FloatMinHelper(T result, T left, T right, Label* out_of_line);
+  template <typename T>
+  void FloatMaxOutOfLineHelper(T result, T left, T right);
+  template <typename T>
+  void FloatMinOutOfLineHelper(T result, T left, T right);
+
   bool generating_stub_;
   bool has_frame_;
   // This handle will be patched with the code object on installation.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index cfcc5b1..331a7e9 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -575,8 +575,8 @@
   last_debugger_input_ = input;
 }
 
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
-                            size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+                            void* start_addr, size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
   start -= intra_line;
@@ -596,7 +596,8 @@
   }
 }
 
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                   void* page) {
   base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
@@ -607,7 +608,8 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+                             intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -619,7 +621,8 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+                            Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -652,7 +655,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new base::HashMap(&ICacheMatch);
+    i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -783,7 +786,8 @@
 
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
     for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
@@ -2886,26 +2890,24 @@
           return;
         }
       }
-      if (FLAG_enable_sudiv) {
-        if (instr->Bits(5, 4) == 0x1) {
-          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
-            // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
-            // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
-            int rm = instr->RmValue();
-            int32_t rm_val = get_register(rm);
-            int rs = instr->RsValue();
-            int32_t rs_val = get_register(rs);
-            int32_t ret_val = 0;
-            // udiv
-            if (instr->Bit(21) == 0x1) {
-              ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
-                  bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
-            } else {
-              ret_val = base::bits::SignedDiv32(rm_val, rs_val);
-            }
-            set_register(rn, ret_val);
-            return;
+      if (instr->Bits(5, 4) == 0x1) {
+        if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+          // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
+          // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
+          int rm = instr->RmValue();
+          int32_t rm_val = get_register(rm);
+          int rs = instr->RsValue();
+          int32_t rs_val = get_register(rs);
+          int32_t ret_val = 0;
+          // udiv
+          if (instr->Bit(21) == 0x1) {
+            ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
+                bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
+          } else {
+            ret_val = base::bits::SignedDiv32(rm_val, rs_val);
           }
+          set_register(rn, ret_val);
+          return;
         }
       }
       // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
@@ -3026,7 +3028,17 @@
   if (instr->Bit(24) == 1) {
     SoftwareInterrupt(instr);
   } else {
-    DecodeTypeVFP(instr);
+    switch (instr->CoprocessorValue()) {
+      case 10:  // Fall through.
+      case 11:
+        DecodeTypeVFP(instr);
+        break;
+      case 15:
+        DecodeTypeCP15(instr);
+        break;
+      default:
+        UNIMPLEMENTED();
+    }
   }
 }
 
@@ -3335,6 +3347,31 @@
   }
 }
 
+void Simulator::DecodeTypeCP15(Instruction* instr) {
+  DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
+  DCHECK(instr->CoprocessorValue() == 15);
+
+  if (instr->Bit(4) == 1) {
+    // mcr
+    int crn = instr->Bits(19, 16);
+    int crm = instr->Bits(3, 0);
+    int opc1 = instr->Bits(23, 21);
+    int opc2 = instr->Bits(7, 5);
+    if ((opc1 == 0) && (crn == 7)) {
+      // ARMv6 memory barrier operations.
+      // Details available in ARM DDI 0406C.b, B3-1750.
+      if (((crm == 10) && (opc2 == 5)) ||  // CP15DMB
+          ((crm == 10) && (opc2 == 4)) ||  // CP15DSB
+          ((crm == 5) && (opc2 == 4))) {   // CP15ISB
+        // These are ignored by the simulator for now.
+      } else {
+        UNIMPLEMENTED();
+      }
+    }
+  } else {
+    UNIMPLEMENTED();
+  }
+}
 
 void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
     Instruction* instr) {
@@ -3750,6 +3787,21 @@
           e++;
         }
         set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+      } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
+                 (instr->Bit(4) == 0)) {
+        int vd = instr->VFPDRegValue(kDoublePrecision);
+        int vm = instr->VFPMRegValue(kDoublePrecision);
+        if (instr->Bit(6) == 0) {
+          // vswp Dd, Dm.
+          uint64_t dval, mval;
+          get_d_register(vd, &dval);
+          get_d_register(vm, &mval);
+          set_d_register(vm, &dval);
+          set_d_register(vd, &mval);
+        } else {
+          // Q register vswp unimplemented.
+          UNIMPLEMENTED();
+        }
       } else {
         UNIMPLEMENTED();
       }
@@ -3848,6 +3900,7 @@
       } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
         // dsb, dmb, isb: ignore instruction for now.
         // TODO(binji): implement
+        // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
       } else {
         UNIMPLEMENTED();
       }
@@ -3908,6 +3961,69 @@
           sd_value = canonicalizeNaN(sd_value);
           set_s_register_from_float(d, sd_value);
         }
+      } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
+                 (instr->Bit(4) == 0x0)) {
+        if (instr->SzValue() == 0x1) {
+          int m = instr->VFPMRegValue(kDoublePrecision);
+          int n = instr->VFPNRegValue(kDoublePrecision);
+          int d = instr->VFPDRegValue(kDoublePrecision);
+          double dn_value = get_double_from_d_register(n);
+          double dm_value = get_double_from_d_register(m);
+          double dd_value;
+          if (instr->Bit(6) == 0x1) {  // vminnm
+            if ((dn_value < dm_value) || std::isnan(dm_value)) {
+              dd_value = dn_value;
+            } else if ((dm_value < dn_value) || std::isnan(dn_value)) {
+              dd_value = dm_value;
+            } else {
+              DCHECK_EQ(dn_value, dm_value);
+              // Make sure that we pick the most negative sign for +/-0.
+              dd_value = std::signbit(dn_value) ? dn_value : dm_value;
+            }
+          } else {  // vmaxnm
+            if ((dn_value > dm_value) || std::isnan(dm_value)) {
+              dd_value = dn_value;
+            } else if ((dm_value > dn_value) || std::isnan(dn_value)) {
+              dd_value = dm_value;
+            } else {
+              DCHECK_EQ(dn_value, dm_value);
+              // Make sure that we pick the most positive sign for +/-0.
+              dd_value = std::signbit(dn_value) ? dm_value : dn_value;
+            }
+          }
+          dd_value = canonicalizeNaN(dd_value);
+          set_d_register_from_double(d, dd_value);
+        } else {
+          int m = instr->VFPMRegValue(kSinglePrecision);
+          int n = instr->VFPNRegValue(kSinglePrecision);
+          int d = instr->VFPDRegValue(kSinglePrecision);
+          float sn_value = get_float_from_s_register(n);
+          float sm_value = get_float_from_s_register(m);
+          float sd_value;
+          if (instr->Bit(6) == 0x1) {  // vminnm
+            if ((sn_value < sm_value) || std::isnan(sm_value)) {
+              sd_value = sn_value;
+            } else if ((sm_value < sn_value) || std::isnan(sn_value)) {
+              sd_value = sm_value;
+            } else {
+              DCHECK_EQ(sn_value, sm_value);
+              // Make sure that we pick the most negative sign for +/-0.
+              sd_value = std::signbit(sn_value) ? sn_value : sm_value;
+            }
+          } else {  // vmaxnm
+            if ((sn_value > sm_value) || std::isnan(sm_value)) {
+              sd_value = sn_value;
+            } else if ((sm_value > sn_value) || std::isnan(sn_value)) {
+              sd_value = sm_value;
+            } else {
+              DCHECK_EQ(sn_value, sm_value);
+              // Make sure that we pick the most positive sign for +/-0.
+              sd_value = std::signbit(sn_value) ? sm_value : sn_value;
+            }
+          }
+          sd_value = canonicalizeNaN(sd_value);
+          set_s_register_from_float(d, sd_value);
+        }
       } else {
         UNIMPLEMENTED();
       }
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 71b8e40..7435b77 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -200,7 +200,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -222,7 +222,8 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -327,6 +328,9 @@
   void DecodeType6(Instruction* instr);
   void DecodeType7(Instruction* instr);
 
+  // CP15 coprocessor instructions.
+  void DecodeTypeCP15(Instruction* instr);
+
   // Support for VFP.
   void DecodeTypeVFP(Instruction* instr);
   void DecodeType6CoprocessorIns(Instruction* instr);
@@ -341,9 +345,12 @@
   void InstructionDecode(Instruction* instr);
 
   // ICache.
-  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
-  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+  static void CheckICache(base::CustomMatcherHashMap* i_cache,
+                          Instruction* instr);
+  static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                 void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -403,7 +410,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  base::HashMap* i_cache_;
+  base::CustomMatcherHashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/arm64/OWNERS b/src/arm64/OWNERS
deleted file mode 100644
index 906a5ce..0000000
--- a/src/arm64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 5f103bc..ca5ea80 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -1089,6 +1089,7 @@
   __ Ldr(cp, MemOperand(cp));
   __ Mov(jssp, Operand(pending_handler_sp_address));
   __ Ldr(jssp, MemOperand(jssp));
+  __ Mov(csp, jssp);
   __ Mov(fp, Operand(pending_handler_fp_address));
   __ Ldr(fp, MemOperand(fp));
 
@@ -1845,7 +1846,6 @@
   //  feedback_vector : the feedback vector
   //  index :           slot in feedback vector (smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1868,7 +1868,7 @@
   Label check_allocation_site;
   __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
   __ Cmp(function, feedback_value);
-  __ B(eq, &done_increment_count);
+  __ B(eq, &done);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ B(eq, &done);
   __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
@@ -1890,7 +1890,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
   __ Cmp(function, scratch1);
   __ B(ne, &megamorphic);
-  __ B(&done_increment_count);
+  __ B(&done);
 
   __ Bind(&miss);
 
@@ -1921,33 +1921,22 @@
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub, argc, function,
                              feedback_vector, index, new_target);
-  __ B(&done_initialize_count);
+  __ B(&done);
 
   __ Bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
                              feedback_vector, index, new_target);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ Mov(scratch1, Operand(Smi::FromInt(1)));
-  __ Adds(scratch2, feedback_vector,
-          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ Str(scratch1,
-         FieldMemOperand(scratch2, FixedArray::kHeaderSize + kPointerSize));
-  __ b(&done);
+  __ Bind(&done);
 
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ Add(scratch1, feedback_vector,
          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
   __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
   __ Str(scratch2, FieldMemOperand(scratch1, 0));
-
-  __ Bind(&done);
 }
 
 
@@ -1995,6 +1984,17 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ Add(feedback_vector, feedback_vector,
+         Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+  __ Add(feedback_vector, feedback_vector,
+         Operand(FixedArray::kHeaderSize + kPointerSize));
+  __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
+  __ Add(slot, slot, Operand(Smi::FromInt(1)));
+  __ Str(slot, FieldMemOperand(feedback_vector, 0));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // x1 - function
@@ -2014,13 +2014,7 @@
   __ Mov(x0, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ Add(feedback_vector, feedback_vector,
-         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ Add(feedback_vector, feedback_vector,
-         Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ Ldr(index, FieldMemOperand(feedback_vector, 0));
-  __ Add(index, index, Operand(Smi::FromInt(1)));
-  __ Str(index, FieldMemOperand(feedback_vector, 0));
+  IncrementCallCount(masm, feedback_vector, index);
 
   // Set up arguments for the array constructor stub.
   Register allocation_site_arg = feedback_vector;
@@ -2038,7 +2032,7 @@
   // x1 - function
   // x3 - slot id (Smi)
   // x2 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -2073,16 +2067,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(function, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ Add(feedback_vector, feedback_vector,
-         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ Add(feedback_vector, feedback_vector,
-         Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ Ldr(index, FieldMemOperand(feedback_vector, 0));
-  __ Add(index, index, Operand(Smi::FromInt(1)));
-  __ Str(index, FieldMemOperand(feedback_vector, 0));
-
   __ Bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, feedback_vector, index);
+
   __ Mov(x0, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -2106,6 +2095,7 @@
     __ jmp(&miss);
   }
 
+  // TODO(mvstanton): the code below is effectively disabled. Investigate.
   __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
 
   // We are going megamorphic. If the feedback is a JSFunction, it is fine
@@ -2118,6 +2108,11 @@
   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
 
   __ Bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, feedback_vector, index);
+
+  __ Bind(&call_count_incremented);
   __ Mov(x0, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -2143,12 +2138,6 @@
   __ Cmp(x4, x5);
   __ B(ne, &miss);
 
-  // Initialize the call counter.
-  __ Mov(x5, Smi::FromInt(1));
-  __ Adds(x4, feedback_vector,
-          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // x2 - vector
   // x3 - slot
@@ -2156,9 +2145,13 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(feedback_vector, index);
+
     __ Push(cp, function);
     __ CallStub(&create_stub);
     __ Pop(cp, function);
+
+    __ Pop(feedback_vector, index);
   }
 
   __ B(&call_function);
@@ -2168,7 +2161,8 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ B(&call);
+  // The runtime increments the call count in the vector for us.
+  __ B(&call_count_incremented);
 }
 
 
@@ -2681,321 +2675,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  ASM_LOCATION("SubStringStub::Generate");
-  Label runtime;
-
-  // Stack frame on entry.
-  //  lr: return address
-  //  jssp[0]:  substring "to" offset
-  //  jssp[8]:  substring "from" offset
-  //  jssp[16]: pointer to string object
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length (in debug mode.)
-  // If any of these assumptions fail, we call the runtime system.
-
-  static const int kToOffset = 0 * kPointerSize;
-  static const int kFromOffset = 1 * kPointerSize;
-  static const int kStringOffset = 2 * kPointerSize;
-
-  Register to = x0;
-  Register from = x15;
-  Register input_string = x10;
-  Register input_length = x11;
-  Register input_type = x12;
-  Register result_string = x0;
-  Register result_length = x1;
-  Register temp = x3;
-
-  __ Peek(to, kToOffset);
-  __ Peek(from, kFromOffset);
-
-  // Check that both from and to are smis. If not, jump to runtime.
-  __ JumpIfEitherNotSmi(from, to, &runtime);
-  __ SmiUntag(from);
-  __ SmiUntag(to);
-
-  // Calculate difference between from and to. If to < from, branch to runtime.
-  __ Subs(result_length, to, from);
-  __ B(mi, &runtime);
-
-  // Check from is positive.
-  __ Tbnz(from, kWSignBit, &runtime);
-
-  // Make sure first argument is a string.
-  __ Peek(input_string, kStringOffset);
-  __ JumpIfSmi(input_string, &runtime);
-  __ IsObjectJSStringType(input_string, input_type, &runtime);
-
-  Label single_char;
-  __ Cmp(result_length, 1);
-  __ B(eq, &single_char);
-
-  // Short-cut for the case of trivial substring.
-  Label return_x0;
-  __ Ldrsw(input_length,
-           UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
-
-  __ Cmp(result_length, input_length);
-  __ CmovX(x0, input_string, eq);
-  // Return original string.
-  __ B(eq, &return_x0);
-
-  // Longer than original string's length or negative: unsafe arguments.
-  __ B(hi, &runtime);
-
-  // Shorter than original string's length: an actual substring.
-
-  //   x0   to               substring end character offset
-  //   x1   result_length    length of substring result
-  //   x10  input_string     pointer to input string object
-  //   x10  unpacked_string  pointer to unpacked string object
-  //   x11  input_length     length of input string
-  //   x12  input_type       instance type of input string
-  //   x15  from             substring start character offset
-
-  // Deal with different string types: update the index if necessary and put
-  // the underlying string into register unpacked_string.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  Label update_instance_type;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-
-  // Test for string types, and branch/fall through to appropriate unpacking
-  // code.
-  __ Tst(input_type, kIsIndirectStringMask);
-  __ B(eq, &seq_or_external_string);
-  __ Tst(input_type, kSlicedNotConsMask);
-  __ B(ne, &sliced_string);
-
-  Register unpacked_string = input_string;
-
-  // Cons string. Check whether it is flat, then fetch first part.
-  __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
-  __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
-  __ Ldr(unpacked_string,
-         FieldMemOperand(input_string, ConsString::kFirstOffset));
-  __ B(&update_instance_type);
-
-  __ Bind(&sliced_string);
-  // Sliced string. Fetch parent and correct start index by offset.
-  __ Ldrsw(temp,
-           UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
-  __ Add(from, from, temp);
-  __ Ldr(unpacked_string,
-         FieldMemOperand(input_string, SlicedString::kParentOffset));
-
-  __ Bind(&update_instance_type);
-  __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
-  __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
-  // Now control must go to &underlying_unpacked. Since the no code is generated
-  // before then we fall through instead of generating a useless branch.
-
-  __ Bind(&seq_or_external_string);
-  // Sequential or external string. Registers unpacked_string and input_string
-  // alias, so there's nothing to do here.
-  // Note that if code is added here, the above code must be updated.
-
-  //   x0   result_string    pointer to result string object (uninit)
-  //   x1   result_length    length of substring result
-  //   x10  unpacked_string  pointer to unpacked string object
-  //   x11  input_length     length of input string
-  //   x12  input_type       instance type of input string
-  //   x15  from             substring start character offset
-  __ Bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    __ Cmp(result_length, SlicedString::kMinLength);
-    // Short slice. Copy instead of slicing.
-    __ B(lt, &copy_routine);
-    // Allocate new sliced string. At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string. It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyway due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
-    __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
-                                   &runtime);
-    __ B(&set_slice_header);
-
-    __ Bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
-                                   &runtime);
-
-    __ Bind(&set_slice_header);
-    __ SmiTag(from);
-    __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
-    __ Str(unpacked_string,
-           FieldMemOperand(result_string, SlicedString::kParentOffset));
-    __ B(&return_x0);
-
-    __ Bind(&copy_routine);
-  }
-
-  //   x0   result_string    pointer to result string object (uninit)
-  //   x1   result_length    length of substring result
-  //   x10  unpacked_string  pointer to unpacked string object
-  //   x11  input_length     length of input string
-  //   x12  input_type       instance type of input string
-  //   x13  unpacked_char0   pointer to first char of unpacked string (uninit)
-  //   x13  substring_char0  pointer to first char of substring (uninit)
-  //   x14  result_char0     pointer to first char of result (uninit)
-  //   x15  from             substring start character offset
-  Register unpacked_char0 = x13;
-  Register substring_char0 = x13;
-  Register result_char0 = x14;
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-
-  __ Tst(input_type, kExternalStringTag);
-  __ B(eq, &sequential_string);
-
-  __ Tst(input_type, kShortExternalStringTag);
-  __ B(ne, &runtime);
-  __ Ldr(unpacked_char0,
-         FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
-  // unpacked_char0 points to the first character of the underlying string.
-  __ B(&allocate_result);
-
-  __ Bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ Add(unpacked_char0, unpacked_string,
-         SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
-  __ Bind(&allocate_result);
-  // Sequential one-byte string. Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
-
-  // Allocate and copy the resulting one-byte string.
-  __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
-
-  // Locate first character of substring to copy.
-  __ Add(substring_char0, unpacked_char0, from);
-
-  // Locate first character of result.
-  __ Add(result_char0, result_string,
-         SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
-  __ B(&return_x0);
-
-  // Allocate and copy the resulting two-byte string.
-  __ Bind(&two_byte_sequential);
-  __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
-
-  // Locate first character of substring to copy.
-  __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
-
-  // Locate first character of result.
-  __ Add(result_char0, result_string,
-         SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  __ Add(result_length, result_length, result_length);
-  __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
-
-  __ Bind(&return_x0);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
-  __ Drop(3);
-  __ Ret();
-
-  __ Bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // x1: result_length
-  // x10: input_string
-  // x12: input_type
-  // x15: from (untagged)
-  __ SmiTag(from);
-  StringCharAtGenerator generator(input_string, from, result_length, x0,
-                                  &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ Drop(3);
-  __ Ret();
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in x0.
-  Label is_number;
-  __ JumpIfSmi(x0, &is_number);
-
-  Label not_string;
-  __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
-  // x0: receiver
-  // x1: receiver instance type
-  __ Ret();
-  __ Bind(&not_string);
-
-  Label not_heap_number;
-  __ Cmp(x1, HEAP_NUMBER_TYPE);
-  __ B(ne, &not_heap_number);
-  __ Bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ Bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Cmp(x1, ODDBALL_TYPE);
-  __ B(ne, &not_oddball);
-  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
-  __ Ret();
-  __ Bind(&not_oddball);
-
-  __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in x0.
-  Label is_number;
-  __ JumpIfSmi(x0, &is_number);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
-  // x0: receiver
-  // x1: receiver instance type
-  __ Ret();
-  __ Bind(&not_name);
-
-  Label not_heap_number;
-  __ Cmp(x1, HEAP_NUMBER_TYPE);
-  __ B(ne, &not_heap_number);
-  __ Bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ Bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Cmp(x1, ODDBALL_TYPE);
-  __ B(ne, &not_oddball);
-  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
-  __ Ret();
-  __ Bind(&not_oddball);
-
-  __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3195,16 +2874,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  Register mem_chunk = regs_.scratch0();
-  Register counter = regs_.scratch1();
-  __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
-  __ Ldr(counter,
-         MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
-  __ Subs(counter, counter, 1);
-  __ Str(counter,
-         MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
-  __ B(mi, &need_incremental);
-
   // If the object is not black we don't have to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
 
@@ -3655,7 +3324,7 @@
 
   __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ mov(feedback, too_far);
   __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
   __ Jump(receiver_map);
@@ -4673,7 +4342,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ Bind(&allocate);
-    __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+    __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
     __ B(gt, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -5093,7 +4762,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ Bind(&allocate);
-  __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+  __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
   __ B(gt, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index 881d2d8..d7bc3de 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -42,13 +42,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return x4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return x3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return x5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return x3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
+const Register StoreTransitionDescriptor::MapRegister() { return x5; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
@@ -407,7 +403,7 @@
                                    &default_descriptor);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   static PlatformInterfaceDescriptor default_descriptor =
       PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
@@ -446,7 +442,19 @@
       x0,  // argument count (not including receiver)
       x3,  // new target
       x1,  // constructor to call
-      x2   // address of the first argument
+      x2,  // allocation site feedback if available, undefined otherwise
+      x4   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      x0,  // argument count (not including receiver)
+      x1,  // target to call checked to be Array function
+      x2,  // allocation site feedback if available, undefined otherwise
+      x3   // address of the first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index f674dd5..87ea1eb 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1571,9 +1571,8 @@
                                 Label* branch) {
   DCHECK(cond == eq || cond == ne);
   UseScratchRegisterScope temps(this);
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
+  CheckPageFlag(object, temps.AcquireSameSizeAs(object),
+                MemoryChunk::kIsInNewSpaceMask, cond, branch);
 }
 
 
@@ -3037,7 +3036,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
@@ -3196,7 +3195,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
 
   DCHECK(!AreAliased(result, scratch1, scratch2));
   DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 06e9a1d..37e9926 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -742,6 +742,18 @@
   // csp must be aligned to 16 bytes.
   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Claim or drop stack space without actually accessing memory.
   //
   // In debug mode, both of these will write invalid data into the claimed or
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index f5595a8..83b4cf7 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -524,7 +524,8 @@
 
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
 }
 
diff --git a/src/arm64/simulator-arm64.h b/src/arm64/simulator-arm64.h
index d490109..c8c715a 100644
--- a/src/arm64/simulator-arm64.h
+++ b/src/arm64/simulator-arm64.h
@@ -151,7 +151,8 @@
 
 class Simulator : public DecoderVisitor {
  public:
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size) {
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size) {
     USE(i_cache);
     USE(start);
     USE(size);
@@ -167,7 +168,7 @@
 
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   static Simulator* current(v8::internal::Isolate* isolate);
 
diff --git a/src/asmjs/asm-js.cc b/src/asmjs/asm-js.cc
index e94d917..a1af1af 100644
--- a/src/asmjs/asm-js.cc
+++ b/src/asmjs/asm-js.cc
@@ -16,9 +16,9 @@
 #include "src/objects.h"
 #include "src/parsing/parse-info.h"
 
-#include "src/wasm/encoder.h"
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module-builder.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-result.h"
 
@@ -30,29 +30,6 @@
 namespace internal {
 
 namespace {
-i::MaybeHandle<i::FixedArray> CompileModule(
-    i::Isolate* isolate, const byte* start, const byte* end,
-    ErrorThrower* thrower,
-    internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
-  // Decode but avoid a redundant pass over function bodies for verification.
-  // Verification will happen during compilation.
-  i::Zone zone(isolate->allocator());
-  internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, start, end, false, origin);
-
-  i::MaybeHandle<i::FixedArray> compiled_module;
-  if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
-    thrower->Error("Asm.js converted module failed to decode");
-  } else if (result.failed()) {
-    thrower->Failed("", result);
-  } else {
-    compiled_module = result.val->CompileFunctions(isolate, thrower);
-  }
-
-  if (result.val) delete result.val;
-  return compiled_module;
-}
-
 Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
                                    Handle<JSReceiver> stdlib,
                                    Handle<Name> name) {
@@ -187,9 +164,9 @@
   i::Handle<i::FixedArray> foreign_globals;
   auto module = builder.Run(&foreign_globals);
 
-  i::MaybeHandle<i::FixedArray> compiled =
-      CompileModule(info->isolate(), module->begin(), module->end(), &thrower,
-                    internal::wasm::kAsmJsOrigin);
+  i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
+      info->isolate(), module->begin(), module->end(), &thrower,
+      internal::wasm::kAsmJsOrigin);
   DCHECK(!compiled.is_null());
 
   wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
@@ -223,24 +200,25 @@
                                               Handle<FixedArray> wasm_data,
                                               Handle<JSArrayBuffer> memory,
                                               Handle<JSReceiver> foreign) {
-  i::Handle<i::FixedArray> compiled(i::FixedArray::cast(wasm_data->get(0)));
+  i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0)));
   i::Handle<i::FixedArray> foreign_globals(
       i::FixedArray::cast(wasm_data->get(1)));
 
   ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
 
   i::MaybeHandle<i::JSObject> maybe_module_object =
-      i::wasm::WasmModule::Instantiate(isolate, compiled, foreign, memory);
+      i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign,
+                                       memory);
   if (maybe_module_object.is_null()) {
     return MaybeHandle<Object>();
   }
 
-  i::Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
-      STATIC_CHAR_VECTOR("__foreign_init__")));
+  i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
+      wasm::AsmWasmBuilder::foreign_init_name));
 
   i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
   i::MaybeHandle<i::Object> maybe_init =
-      i::Object::GetProperty(module_object, name);
+      i::Object::GetProperty(module_object, init_name);
   DCHECK(!maybe_init.is_null());
 
   i::Handle<i::Object> init = maybe_init.ToHandleChecked();
@@ -265,10 +243,18 @@
   i::MaybeHandle<i::Object> retval = i::Execution::Call(
       isolate, init, undefined, foreign_globals->length(), foreign_args_array);
   delete[] foreign_args_array;
-
   DCHECK(!retval.is_null());
 
-  return maybe_module_object;
+  i::Handle<i::Name> single_function_name(
+      isolate->factory()->InternalizeUtf8String(
+          wasm::AsmWasmBuilder::single_function_name));
+  i::MaybeHandle<i::Object> single_function =
+      i::Object::GetProperty(module_object, single_function_name);
+  if (!single_function.is_null() &&
+      !single_function.ToHandleChecked()->IsUndefined(isolate)) {
+    return single_function;
+  }
+  return module_object;
 }
 
 }  // namespace internal
diff --git a/src/asmjs/asm-js.h b/src/asmjs/asm-js.h
index 44bf04d..a2c5cec 100644
--- a/src/asmjs/asm-js.h
+++ b/src/asmjs/asm-js.h
@@ -5,24 +5,21 @@
 #ifndef V8_ASMJS_ASM_JS_H_
 #define V8_ASMJS_ASM_JS_H_
 
-#ifndef V8_SHARED
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif  // !V8_SHARED
-#include "src/parsing/parser.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
+
+class JSArrayBuffer;
+class ParseInfo;
+
 // Interface to compile and instantiate for asmjs.
 class AsmJs {
  public:
-  static MaybeHandle<FixedArray> ConvertAsmToWasm(i::ParseInfo* info);
-  static bool IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
+  static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info);
+  static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
                             Handle<JSReceiver> stdlib);
-  static MaybeHandle<Object> InstantiateAsmWasm(i::Isolate* isolate,
+  static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
                                                 Handle<FixedArray> wasm_data,
                                                 Handle<JSArrayBuffer> memory,
                                                 Handle<JSReceiver> foreign);
diff --git a/src/asmjs/asm-typer.cc b/src/asmjs/asm-typer.cc
index 1d070a0..94cc4db 100644
--- a/src/asmjs/asm-typer.cc
+++ b/src/asmjs/asm-typer.cc
@@ -17,7 +17,6 @@
 #include "src/base/bits.h"
 #include "src/codegen.h"
 #include "src/globals.h"
-#include "src/type-cache.h"
 #include "src/utils.h"
 
 #define FAIL(node, msg)                                        \
@@ -129,14 +128,13 @@
       script_(script),
       root_(root),
       forward_definitions_(zone),
+      ffi_use_signatures_(zone),
       stdlib_types_(zone),
       stdlib_math_types_(zone),
       module_info_(VariableInfo::ForSpecialSymbol(zone_, kModule)),
-      global_scope_(ZoneHashMap::PointersMatch,
-                    ZoneHashMap::kDefaultHashMapCapacity,
+      global_scope_(ZoneHashMap::kDefaultHashMapCapacity,
                     ZoneAllocationPolicy(zone)),
-      local_scope_(ZoneHashMap::PointersMatch,
-                   ZoneHashMap::kDefaultHashMapCapacity,
+      local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
                    ZoneAllocationPolicy(zone)),
       stack_limit_(isolate->stack_guard()->real_climit()),
       node_types_(zone_),
@@ -330,8 +328,8 @@
   return i->second;
 }
 
-AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) {
-  ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
+AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
+  const ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
   ZoneHashMap::Entry* entry =
       scope->Lookup(variable, ComputePointerHash(variable));
   if (entry == nullptr && in_function_) {
@@ -424,6 +422,8 @@
   return AsmType::None();
 }
 
+AsmType* AsmTyper::TypeOf(Variable* v) const { return Lookup(v)->type(); }
+
 AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
   auto* var_info = Lookup(var);
   if (var_info == nullptr) {
@@ -606,8 +606,10 @@
   if (estatement != nullptr) {
     Assignment* assignment = estatement->expression()->AsAssignment();
     if (assignment != nullptr && assignment->target()->IsVariableProxy() &&
-        assignment->target()->AsVariableProxy()->var()->mode() ==
-            CONST_LEGACY) {
+        assignment->target()
+            ->AsVariableProxy()
+            ->var()
+            ->is_sloppy_function_name()) {
       use_asm_directive = iter.Next();
     }
   }
@@ -760,7 +762,7 @@
   bool global_variable = false;
   if (value->IsLiteral() || value->IsCall()) {
     AsmType* type = nullptr;
-    RECURSE(type = VariableTypeAnnotations(value));
+    RECURSE(type = VariableTypeAnnotations(value, true));
     target_info = new (zone_) VariableInfo(type);
     target_info->set_mutability(VariableInfo::kMutableGlobal);
     global_variable = true;
@@ -1509,7 +1511,7 @@
 }
 
 namespace {
-bool IsNegate(BinaryOperation* binop) {
+bool IsInvert(BinaryOperation* binop) {
   if (binop->op() != Token::BIT_XOR) {
     return false;
   }
@@ -1524,7 +1526,7 @@
 }
 
 bool IsUnaryMinus(BinaryOperation* binop) {
-  // *VIOLATION* The parser replaces uses of +x with x*1.0.
+  // *VIOLATION* The parser replaces uses of -x with x*-1.
   if (binop->op() != Token::MUL) {
     return false;
   }
@@ -1570,7 +1572,7 @@
       }
 
       if (IsUnaryMinus(expr)) {
-        // *VIOLATION* the parser converts -x to x * -1.0.
+        // *VIOLATION* the parser converts -x to x * -1.
         AsmType* left_type;
         RECURSE(left_type = ValidateExpression(expr->left()));
         SetTypeOf(expr->right(), left_type);
@@ -1595,11 +1597,11 @@
     case Token::BIT_AND:
       return ValidateBitwiseANDExpression(expr);
     case Token::BIT_XOR:
-      if (IsNegate(expr)) {
+      if (IsInvert(expr)) {
         auto* left = expr->left();
         auto* left_as_binop = left->AsBinaryOperation();
 
-        if (left_as_binop != nullptr && IsNegate(left_as_binop)) {
+        if (left_as_binop != nullptr && IsInvert(left_as_binop)) {
           // This is the special ~~ operator.
           AsmType* left_type;
           RECURSE(left_type = ValidateExpression(left_as_binop->left()));
@@ -1660,6 +1662,12 @@
     return AsmType::Double();
   }
 
+  // The parser collapses expressions like !0 and !123 to true/false.
+  // We therefore need to permit these as alternate versions of 0 / 1.
+  if (literal->raw_value()->IsTrue() || literal->raw_value()->IsFalse()) {
+    return AsmType::Int();
+  }
+
   uint32_t value;
   if (!literal->value()->ToUint32(&value)) {
     int32_t value;
@@ -2305,9 +2313,20 @@
       FAIL(call, "Calling something that's not a function.");
     }
 
-    if (callee_type->AsFFIType() != nullptr &&
-        return_type == AsmType::Float()) {
-      FAIL(call, "Foreign functions can't return float.");
+    if (callee_type->AsFFIType() != nullptr) {
+      if (return_type == AsmType::Float()) {
+        FAIL(call, "Foreign functions can't return float.");
+      }
+      // Record FFI use signature, since the asm->wasm translator must know
+      // all uses up-front.
+      ffi_use_signatures_.emplace_back(
+          FFIUseSignature(call_var_proxy->var(), zone_));
+      FFIUseSignature* sig = &ffi_use_signatures_.back();
+      sig->return_type_ = return_type;
+      sig->arg_types_.reserve(args.size());
+      for (size_t i = 0; i < args.size(); ++i) {
+        sig->arg_types_.emplace_back(args[i]);
+      }
     }
 
     if (!callee_type->CanBeInvokedWith(return_type, args)) {
@@ -2662,7 +2681,8 @@
 
 // 5.4 VariableTypeAnnotations
 // Also used for 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer) {
+AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
+                                           bool global) {
   if (auto* literal = initializer->AsLiteral()) {
     if (literal->raw_value()->ContainsDot()) {
       SetTypeOf(initializer, AsmType::Double());
@@ -2703,10 +2723,13 @@
          "to fround.");
   }
 
-  if (!src_expr->raw_value()->ContainsDot()) {
-    FAIL(initializer,
-         "Invalid float type annotation - expected literal argument to be a "
-         "floating point literal.");
+  // Float constants must contain dots in local, but not in globals.
+  if (!global) {
+    if (!src_expr->raw_value()->ContainsDot()) {
+      FAIL(initializer,
+           "Invalid float type annotation - expected literal argument to be a "
+           "floating point literal.");
+    }
   }
 
   return AsmType::Float();
diff --git a/src/asmjs/asm-typer.h b/src/asmjs/asm-typer.h
index 6b9c70c..942ca21 100644
--- a/src/asmjs/asm-typer.h
+++ b/src/asmjs/asm-typer.h
@@ -12,12 +12,12 @@
 #include "src/allocation.h"
 #include "src/asmjs/asm-types.h"
 #include "src/ast/ast-type-bounds.h"
+#include "src/ast/ast-types.h"
 #include "src/ast/ast.h"
 #include "src/effects.h"
 #include "src/type-info.h"
-#include "src/types.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -73,12 +73,26 @@
   const char* error_message() const { return error_message_; }
 
   AsmType* TypeOf(AstNode* node) const;
+  AsmType* TypeOf(Variable* v) const;
   StandardMember VariableAsStandardMember(Variable* var);
 
   typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
 
   StdlibSet StdlibUses() const { return stdlib_uses_; }
 
+  // Each FFI import has a usage-site signature associated with it.
+  struct FFIUseSignature {
+    Variable* var;
+    ZoneVector<AsmType*> arg_types_;
+    AsmType* return_type_;
+    FFIUseSignature(Variable* v, Zone* zone)
+        : var(v), arg_types_(zone), return_type_(nullptr) {}
+  };
+
+  const ZoneVector<FFIUseSignature>& FFIUseSignatures() {
+    return ffi_use_signatures_;
+  }
+
  private:
   friend class v8::internal::wasm::AsmTyperHarnessBuilder;
 
@@ -192,7 +206,7 @@
   //   Lookup(Delta, Gamma, x)
   //
   // Delta is the global_scope_ member, and Gamma, local_scope_.
-  VariableInfo* Lookup(Variable* variable);
+  VariableInfo* Lookup(Variable* variable) const;
 
   // All of the ValidateXXX methods below return AsmType::None() in case of
   // validation failure.
@@ -292,8 +306,9 @@
   // 5.2 ReturnTypeAnnotations
   AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
   // 5.4 VariableTypeAnnotations
-  AsmType* VariableTypeAnnotations(Expression* initializer);
   // 5.5 GlobalVariableTypeAnnotations
+  AsmType* VariableTypeAnnotations(Expression* initializer,
+                                   bool global = false);
   AsmType* ImportExpression(Property* import);
   AsmType* NewHeapView(CallNew* new_heap_view);
 
@@ -306,6 +321,7 @@
   AsmType* return_type_ = nullptr;
 
   ZoneVector<VariableInfo*> forward_definitions_;
+  ZoneVector<FFIUseSignature> ffi_use_signatures_;
   ObjectTypeMap stdlib_types_;
   ObjectTypeMap stdlib_math_types_;
 
diff --git a/src/asmjs/asm-types.h b/src/asmjs/asm-types.h
index c307bf5..6fe4201 100644
--- a/src/asmjs/asm-types.h
+++ b/src/asmjs/asm-types.h
@@ -8,8 +8,8 @@
 #include <string>
 
 #include "src/base/macros.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/asmjs/asm-wasm-builder.cc b/src/asmjs/asm-wasm-builder.cc
index 6419459..091f793 100644
--- a/src/asmjs/asm-wasm-builder.cc
+++ b/src/asmjs/asm-wasm-builder.cc
@@ -32,6 +32,7 @@
   } while (false)
 
 enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
+enum ValueFate { kDrop, kLeaveOnStack };
 
 struct ForeignVariable {
   Handle<Name> name;
@@ -43,14 +44,11 @@
  public:
   AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
                      AsmTyper* typer)
-      : local_variables_(base::HashMap::PointersMatch,
-                         ZoneHashMap::kDefaultHashMapCapacity,
+      : local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
-        functions_(base::HashMap::PointersMatch,
-                   ZoneHashMap::kDefaultHashMapCapacity,
+        functions_(ZoneHashMap::kDefaultHashMapCapacity,
                    ZoneAllocationPolicy(zone)),
-        global_variables_(base::HashMap::PointersMatch,
-                          ZoneHashMap::kDefaultHashMapCapacity,
+        global_variables_(ZoneHashMap::kDefaultHashMapCapacity,
                           ZoneAllocationPolicy(zone)),
         scope_(kModuleScope),
         builder_(new (zone) WasmModuleBuilder(zone)),
@@ -61,46 +59,43 @@
         typer_(typer),
         breakable_blocks_(zone),
         foreign_variables_(zone),
-        init_function_index_(0),
-        foreign_init_function_index_(0),
+        init_function_(nullptr),
+        foreign_init_function_(nullptr),
         next_table_index_(0),
-        function_tables_(base::HashMap::PointersMatch,
-                         ZoneHashMap::kDefaultHashMapCapacity,
+        function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
         imported_function_table_(this) {
     InitializeAstVisitor(isolate);
   }
 
   void InitializeInitFunction() {
-    init_function_index_ = builder_->AddFunction();
     FunctionSig::Builder b(zone(), 0, 0);
-    current_function_builder_ = builder_->FunctionAt(init_function_index_);
-    current_function_builder_->SetSignature(b.Build());
-    builder_->MarkStartFunction(init_function_index_);
-    current_function_builder_ = nullptr;
+    init_function_ = builder_->AddFunction(b.Build());
+    builder_->MarkStartFunction(init_function_);
   }
 
   void BuildForeignInitFunction() {
-    foreign_init_function_index_ = builder_->AddFunction();
+    foreign_init_function_ = builder_->AddFunction();
     FunctionSig::Builder b(zone(), 0, foreign_variables_.size());
     for (auto i = foreign_variables_.begin(); i != foreign_variables_.end();
          ++i) {
       b.AddParam(i->type);
     }
-    current_function_builder_ =
-        builder_->FunctionAt(foreign_init_function_index_);
-    current_function_builder_->SetExported();
+    foreign_init_function_->SetExported();
     std::string raw_name = "__foreign_init__";
-    current_function_builder_->SetName(raw_name.data(),
-                                       static_cast<int>(raw_name.size()));
-    current_function_builder_->SetSignature(b.Build());
+    foreign_init_function_->SetName(
+        AsmWasmBuilder::foreign_init_name,
+        static_cast<int>(strlen(AsmWasmBuilder::foreign_init_name)));
+
+    foreign_init_function_->SetName(raw_name.data(),
+                                    static_cast<int>(raw_name.size()));
+    foreign_init_function_->SetSignature(b.Build());
     for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
-      current_function_builder_->EmitGetLocal(static_cast<uint32_t>(pos));
+      foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
       ForeignVariable* fv = &foreign_variables_[pos];
       uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
-      current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+      foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
     }
-    current_function_builder_ = nullptr;
   }
 
   i::Handle<i::FixedArray> GetForeignArgs() {
@@ -124,8 +119,7 @@
   void VisitFunctionDeclaration(FunctionDeclaration* decl) {
     DCHECK_EQ(kModuleScope, scope_);
     DCHECK_NULL(current_function_builder_);
-    uint32_t index = LookupOrInsertFunction(decl->proxy()->var());
-    current_function_builder_ = builder_->FunctionAt(index);
+    current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
     scope_ = kFuncScope;
     RECURSE(Visit(decl->fun()));
     scope_ = kModuleScope;
@@ -157,8 +151,7 @@
       }
     }
     if (scope_ == kFuncScope) {
-      BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
-                           false);
+      BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
       RECURSE(VisitStatements(stmt->statements()));
     } else {
       RECURSE(VisitStatements(stmt->statements()));
@@ -171,10 +164,12 @@
 
    public:
     BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
-                 WasmOpcode opcode, bool is_loop)
+                 WasmOpcode opcode)
         : builder_(builder) {
-      builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
-      builder_->current_function_builder_->Emit(opcode);
+      builder_->breakable_blocks_.push_back(
+          std::make_pair(stmt, opcode == kExprLoop));
+      // block and loops have a type immediate.
+      builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
     }
     ~BlockVisitor() {
       builder_->current_function_builder_->Emit(kExprEnd);
@@ -183,7 +178,32 @@
   };
 
   void VisitExpressionStatement(ExpressionStatement* stmt) {
-    RECURSE(Visit(stmt->expression()));
+    VisitForEffect(stmt->expression());
+  }
+
+  void VisitForEffect(Expression* expr) {
+    if (expr->IsAssignment()) {
+      // Don't emit drops for assignments. Instead use SetLocal/GetLocal.
+      VisitAssignment(expr->AsAssignment(), kDrop);
+      return;
+    }
+    if (expr->IsCall()) {
+      // Only emit a drop if the call has a non-void return value.
+      if (VisitCallExpression(expr->AsCall()) && scope_ == kFuncScope) {
+        current_function_builder_->Emit(kExprDrop);
+      }
+      return;
+    }
+    if (expr->IsBinaryOperation()) {
+      BinaryOperation* binop = expr->AsBinaryOperation();
+      if (binop->op() == Token::COMMA) {
+        VisitForEffect(binop->left());
+        VisitForEffect(binop->right());
+        return;
+      }
+    }
+    RECURSE(Visit(expr));
+    if (scope_ == kFuncScope) current_function_builder_->Emit(kExprDrop);
   }
 
   void VisitEmptyStatement(EmptyStatement* stmt) {}
@@ -193,7 +213,7 @@
   void VisitIfStatement(IfStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(stmt->condition()));
-    current_function_builder_->Emit(kExprIf);
+    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
     // WASM ifs come with implement blocks for both arms.
     breakable_blocks_.push_back(std::make_pair(nullptr, false));
     if (stmt->HasThenStatement()) {
@@ -207,48 +227,26 @@
     breakable_blocks_.pop_back();
   }
 
-  void VisitContinueStatement(ContinueStatement* stmt) {
+  void DoBreakOrContinue(BreakableStatement* target, bool is_continue) {
     DCHECK_EQ(kFuncScope, scope_);
-    DCHECK_NOT_NULL(stmt->target());
-    int i = static_cast<int>(breakable_blocks_.size()) - 1;
-    int block_distance = 0;
-    for (; i >= 0; i--) {
+    for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
       auto elem = breakable_blocks_.at(i);
-      if (elem.first == stmt->target()) {
-        DCHECK(elem.second);
-        break;
-      } else if (elem.second) {
-        block_distance += 2;
-      } else {
-        block_distance += 1;
+      if (elem.first == target && elem.second == is_continue) {
+        int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
+        current_function_builder_->Emit(kExprBr);
+        current_function_builder_->EmitVarInt(block_distance);
+        return;
       }
     }
-    DCHECK(i >= 0);
-    current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
-    current_function_builder_->EmitVarInt(block_distance);
+    UNREACHABLE();  // statement not found
+  }
+
+  void VisitContinueStatement(ContinueStatement* stmt) {
+    DoBreakOrContinue(stmt->target(), true);
   }
 
   void VisitBreakStatement(BreakStatement* stmt) {
-    DCHECK_EQ(kFuncScope, scope_);
-    DCHECK_NOT_NULL(stmt->target());
-    int i = static_cast<int>(breakable_blocks_.size()) - 1;
-    int block_distance = 0;
-    for (; i >= 0; i--) {
-      auto elem = breakable_blocks_.at(i);
-      if (elem.first == stmt->target()) {
-        if (elem.second) {
-          block_distance++;
-        }
-        break;
-      } else if (elem.second) {
-        block_distance += 2;
-      } else {
-        block_distance += 1;
-      }
-    }
-    DCHECK(i >= 0);
-    current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
-    current_function_builder_->EmitVarInt(block_distance);
+    DoBreakOrContinue(stmt->target(), false);
   }
 
   void VisitReturnStatement(ReturnStatement* stmt) {
@@ -258,9 +256,7 @@
       scope_ = kModuleScope;
     } else if (scope_ == kFuncScope) {
       RECURSE(Visit(stmt->expression()));
-      uint8_t arity =
-          TypeOf(stmt->expression()) == kAstStmt ? ARITY_0 : ARITY_1;
-      current_function_builder_->EmitWithU8(kExprReturn, arity);
+      current_function_builder_->Emit(kExprReturn);
     } else {
       UNREACHABLE();
     }
@@ -276,7 +272,7 @@
       VisitVariableProxy(tag);
       current_function_builder_->EmitI32Const(node->begin);
       current_function_builder_->Emit(kExprI32LtS);
-      current_function_builder_->Emit(kExprIf);
+      current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       if_depth++;
       breakable_blocks_.push_back(std::make_pair(nullptr, false));
       HandleCase(node->left, case_to_block, tag, default_block, if_depth);
@@ -286,7 +282,7 @@
       VisitVariableProxy(tag);
       current_function_builder_->EmitI32Const(node->end);
       current_function_builder_->Emit(kExprI32GtS);
-      current_function_builder_->Emit(kExprIf);
+      current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       if_depth++;
       breakable_blocks_.push_back(std::make_pair(nullptr, false));
       HandleCase(node->right, case_to_block, tag, default_block, if_depth);
@@ -296,9 +292,9 @@
       VisitVariableProxy(tag);
       current_function_builder_->EmitI32Const(node->begin);
       current_function_builder_->Emit(kExprI32Eq);
-      current_function_builder_->Emit(kExprIf);
+      current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       DCHECK(case_to_block.find(node->begin) != case_to_block.end());
-      current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+      current_function_builder_->Emit(kExprBr);
       current_function_builder_->EmitVarInt(1 + if_depth +
                                             case_to_block[node->begin]);
       current_function_builder_->Emit(kExprEnd);
@@ -310,22 +306,22 @@
       } else {
         VisitVariableProxy(tag);
       }
-      current_function_builder_->EmitWithU8(kExprBrTable, ARITY_0);
+      current_function_builder_->Emit(kExprBrTable);
       current_function_builder_->EmitVarInt(node->end - node->begin + 1);
-      for (int v = node->begin; v <= node->end; v++) {
+      for (int v = node->begin; v <= node->end; ++v) {
         if (case_to_block.find(v) != case_to_block.end()) {
-          byte break_code[] = {BR_TARGET(if_depth + case_to_block[v])};
-          current_function_builder_->EmitCode(break_code, sizeof(break_code));
+          uint32_t target = if_depth + case_to_block[v];
+          current_function_builder_->EmitVarInt(target);
         } else {
-          byte break_code[] = {BR_TARGET(if_depth + default_block)};
-          current_function_builder_->EmitCode(break_code, sizeof(break_code));
+          uint32_t target = if_depth + default_block;
+          current_function_builder_->EmitVarInt(target);
         }
         if (v == kMaxInt) {
           break;
         }
       }
-      byte break_code[] = {BR_TARGET(if_depth + default_block)};
-      current_function_builder_->EmitCode(break_code, sizeof(break_code));
+      uint32_t target = if_depth + default_block;
+      current_function_builder_->EmitVarInt(target);
     }
 
     while (if_depth-- != prev_if_depth) {
@@ -342,14 +338,14 @@
     if (case_count == 0) {
       return;
     }
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false);
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
     ZoneVector<BlockVisitor*> blocks(zone_);
     ZoneVector<int32_t> cases(zone_);
     ZoneMap<int, unsigned int> case_to_block(zone_);
     bool has_default = false;
-    for (int i = case_count - 1; i >= 0; i--) {
+    for (int i = case_count - 1; i >= 0; --i) {
       CaseClause* clause = clauses->at(i);
-      blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock, false));
+      blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock));
       if (!clause->is_default()) {
         Literal* label = clause->label()->AsLiteral();
         Handle<Object> value = label->value();
@@ -366,12 +362,12 @@
     }
     if (!has_default || case_count > 1) {
       int default_block = has_default ? case_count - 1 : case_count;
-      BlockVisitor switch_logic_block(this, nullptr, kExprBlock, false);
+      BlockVisitor switch_logic_block(this, nullptr, kExprBlock);
       CaseNode* root = OrderCases(&cases, zone_);
       HandleCase(root, case_to_block, tag, default_block, 0);
       if (root->left != nullptr || root->right != nullptr ||
           root->begin == root->end) {
-        current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+        current_function_builder_->Emit(kExprBr);
         current_function_builder_->EmitVarInt(default_block);
       }
     }
@@ -388,22 +384,24 @@
 
   void VisitDoWhileStatement(DoWhileStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
     RECURSE(Visit(stmt->body()));
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->Emit(kExprIf);
-    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+    current_function_builder_->EmitWithU8(kExprBr, 1);
     current_function_builder_->Emit(kExprEnd);
   }
 
   void VisitWhileStatement(WhileStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
     RECURSE(Visit(stmt->cond()));
     breakable_blocks_.push_back(std::make_pair(nullptr, false));
-    current_function_builder_->Emit(kExprIf);
+    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
     RECURSE(Visit(stmt->body()));
-    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+    current_function_builder_->EmitWithU8(kExprBr, 1);
     current_function_builder_->Emit(kExprEnd);
     breakable_blocks_.pop_back();
   }
@@ -413,13 +411,13 @@
     if (stmt->init() != nullptr) {
       RECURSE(Visit(stmt->init()));
     }
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
     if (stmt->cond() != nullptr) {
       RECURSE(Visit(stmt->cond()));
       current_function_builder_->Emit(kExprI32Eqz);
-      current_function_builder_->Emit(kExprIf);
-      current_function_builder_->Emit(kExprNop);
-      current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 2);
+      current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+      current_function_builder_->EmitWithU8(kExprBr, 2);
       current_function_builder_->Emit(kExprEnd);
     }
     if (stmt->body() != nullptr) {
@@ -428,8 +426,7 @@
     if (stmt->next() != nullptr) {
       RECURSE(Visit(stmt->next()));
     }
-    current_function_builder_->Emit(kExprNop);
-    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 0);
+    current_function_builder_->EmitWithU8(kExprBr, 0);
   }
 
   void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
@@ -446,19 +443,13 @@
     DeclarationScope* scope = expr->scope();
     if (scope_ == kFuncScope) {
       if (auto* func_type = typer_->TypeOf(expr)->AsFunctionType()) {
-        // Build the signature for the function.
-        LocalType return_type = TypeFrom(func_type->ReturnType());
+        // Add the parameters for the function.
         const auto& arguments = func_type->Arguments();
-        FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
-                               arguments.size());
-        if (return_type != kAstStmt) b.AddReturn(return_type);
         for (int i = 0; i < expr->parameter_count(); ++i) {
           LocalType type = TypeFrom(arguments[i]);
           DCHECK_NE(kAstStmt, type);
-          b.AddParam(type);
           InsertParameter(scope->parameter(i), type, i);
         }
-        current_function_builder_->SetSignature(b.Build());
       } else {
         UNREACHABLE();
       }
@@ -476,7 +467,24 @@
     RECURSE(Visit(expr->condition()));
     // WASM ifs come with implicit blocks for both arms.
     breakable_blocks_.push_back(std::make_pair(nullptr, false));
-    current_function_builder_->Emit(kExprIf);
+    LocalTypeCode type;
+    switch (TypeOf(expr)) {
+      case kAstI32:
+        type = kLocalI32;
+        break;
+      case kAstI64:
+        type = kLocalI64;
+        break;
+      case kAstF32:
+        type = kLocalF32;
+        break;
+      case kAstF64:
+        type = kLocalF64;
+        break;
+      default:
+        UNREACHABLE();
+    }
+    current_function_builder_->EmitWithU8(kExprIf, type);
     RECURSE(Visit(expr->then_expression()));
     current_function_builder_->Emit(kExprElse);
     RECURSE(Visit(expr->else_expression()));
@@ -551,12 +559,22 @@
         current_function_builder_->EmitGetLocal(
             LookupOrInsertLocal(var, var_type));
       }
+    } else if (scope_ == kExportScope) {
+      Variable* var = expr->var();
+      DCHECK(var->is_function());
+      WasmFunctionBuilder* function = LookupOrInsertFunction(var);
+      function->SetExported();
+      function->SetName(
+          AsmWasmBuilder::single_function_name,
+          static_cast<int>(strlen(AsmWasmBuilder::single_function_name)));
     }
   }
 
   void VisitLiteral(Literal* expr) {
     Handle<Object> value = expr->value();
-    if (!value->IsNumber() || (scope_ != kFuncScope && scope_ != kInitScope)) {
+    if (!(value->IsNumber() || expr->raw_value()->IsTrue() ||
+          expr->raw_value()->IsFalse()) ||
+        (scope_ != kFuncScope && scope_ != kInitScope)) {
       return;
     }
     AsmType* type = typer_->TypeOf(expr);
@@ -577,10 +595,40 @@
       int32_t i = static_cast<int32_t>(u);
       byte code[] = {WASM_I32V(i)};
       current_function_builder_->EmitCode(code, sizeof(code));
+    } else if (type->IsA(AsmType::Int())) {
+      // The parser can collapse !0, !1 etc to true / false.
+      // Allow these as int literals.
+      if (expr->raw_value()->IsTrue()) {
+        byte code[] = {WASM_I32V(1)};
+        current_function_builder_->EmitCode(code, sizeof(code));
+      } else if (expr->raw_value()->IsFalse()) {
+        byte code[] = {WASM_I32V(0)};
+        current_function_builder_->EmitCode(code, sizeof(code));
+      } else if (expr->raw_value()->IsNumber()) {
+        // This can happen when -x becomes x * -1 (due to the parser).
+        int32_t i = 0;
+        if (!value->ToInt32(&i) || i != -1) {
+          UNREACHABLE();
+        }
+        byte code[] = {WASM_I32V(i)};
+        current_function_builder_->EmitCode(code, sizeof(code));
+      } else {
+        UNREACHABLE();
+      }
     } else if (type->IsA(AsmType::Double())) {
+      // TODO(bradnelson): Pattern match the case where negation occurs and
+      // emit f64.neg instead.
       double val = expr->raw_value()->AsNumber();
       byte code[] = {WASM_F64(val)};
       current_function_builder_->EmitCode(code, sizeof(code));
+    } else if (type->IsA(AsmType::Float())) {
+      // This can happen when -fround(x) becomes fround(x) * 1.0[float]
+      // (due to the parser).
+      // TODO(bradnelson): Pattern match this and emit f32.neg instead.
+      double val = expr->raw_value()->AsNumber();
+      DCHECK_EQ(-1.0, val);
+      byte code[] = {WASM_F32(val)};
+      current_function_builder_->EmitCode(code, sizeof(code));
     } else {
       UNREACHABLE();
     }
@@ -601,11 +649,10 @@
       DCHECK(name->IsPropertyName());
       const AstRawString* raw_name = name->AsRawPropertyName();
       if (var->is_function()) {
-        uint32_t index = LookupOrInsertFunction(var);
-        builder_->FunctionAt(index)->SetExported();
-        builder_->FunctionAt(index)->SetName(
-            reinterpret_cast<const char*>(raw_name->raw_data()),
-            raw_name->length());
+        WasmFunctionBuilder* function = LookupOrInsertFunction(var);
+        function->SetExported();
+        function->SetName(reinterpret_cast<const char*>(raw_name->raw_data()),
+                          raw_name->length());
       }
     }
   }
@@ -613,7 +660,7 @@
   void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
 
   void LoadInitFunction() {
-    current_function_builder_ = builder_->FunctionAt(init_function_index_);
+    current_function_builder_ = init_function_;
     scope_ = kInitScope;
   }
 
@@ -642,7 +689,8 @@
     for (int i = 0; i < funcs->values()->length(); ++i) {
       VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
       DCHECK_NOT_NULL(func);
-      builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
+      builder_->AddIndirectFunction(
+          LookupOrInsertFunction(func->var())->func_index());
     }
   }
 
@@ -684,20 +732,20 @@
 
    public:
     explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
-        : table_(base::HashMap::PointersMatch,
-                 ZoneHashMap::kDefaultHashMapCapacity,
+        : table_(ZoneHashMap::kDefaultHashMapCapacity,
                  ZoneAllocationPolicy(builder->zone())),
           builder_(builder) {}
 
     void AddImport(Variable* v, const char* name, int name_length) {
       ImportedFunctionIndices* indices = new (builder_->zone())
           ImportedFunctionIndices(name, name_length, builder_->zone());
-      ZoneHashMap::Entry* entry = table_.LookupOrInsert(
+      auto* entry = table_.LookupOrInsert(
           v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
       entry->value = indices;
     }
 
-    uint32_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+    // Get a function's index (or allocate if new).
+    uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) {
       ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
       DCHECK_NOT_NULL(entry);
       ImportedFunctionIndices* indices =
@@ -774,7 +822,7 @@
     RECURSE(Visit(value));
   }
 
-  void EmitAssignment(Assignment* expr, MachineType type) {
+  void EmitAssignment(Assignment* expr, MachineType type, ValueFate fate) {
     // Match the left hand side of the assignment.
     VariableProxy* target_var = expr->target()->AsVariableProxy();
     if (target_var != nullptr) {
@@ -783,11 +831,19 @@
       LocalType var_type = TypeOf(expr);
       DCHECK_NE(kAstStmt, var_type);
       if (var->IsContextSlot()) {
-        current_function_builder_->EmitWithVarInt(
-            kExprSetGlobal, LookupOrInsertGlobal(var, var_type));
+        uint32_t index = LookupOrInsertGlobal(var, var_type);
+        current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+        if (fate == kLeaveOnStack) {
+          current_function_builder_->EmitWithVarInt(kExprGetGlobal, index);
+        }
       } else {
-        current_function_builder_->EmitSetLocal(
-            LookupOrInsertLocal(var, var_type));
+        if (fate == kDrop) {
+          current_function_builder_->EmitSetLocal(
+              LookupOrInsertLocal(var, var_type));
+        } else {
+          current_function_builder_->EmitTeeLocal(
+              LookupOrInsertLocal(var, var_type));
+        }
       }
     }
 
@@ -799,6 +855,7 @@
               ->IsA(AsmType::Float32Array())) {
         current_function_builder_->Emit(kExprF32ConvertF64);
       }
+      // Note that unlike StoreMem, AsmjsStoreMem ignores out-of-bounds writes.
       WasmOpcode opcode;
       if (type == MachineType::Int8()) {
         opcode = kExprI32AsmjsStoreMem8;
@@ -820,6 +877,10 @@
         UNREACHABLE();
       }
       current_function_builder_->Emit(opcode);
+      if (fate == kDrop) {
+        // Asm.js stores to memory leave their result on the stack.
+        current_function_builder_->Emit(kExprDrop);
+      }
     }
 
     if (target_var == nullptr && target_prop == nullptr) {
@@ -828,12 +889,16 @@
   }
 
   void VisitAssignment(Assignment* expr) {
+    VisitAssignment(expr, kLeaveOnStack);
+  }
+
+  void VisitAssignment(Assignment* expr, ValueFate fate) {
     bool as_init = false;
     if (scope_ == kModuleScope) {
       // Skip extra assignment inserted by the parser when in this form:
       // (function Module(a, b, c) {... })
       if (expr->target()->IsVariableProxy() &&
-          expr->target()->AsVariableProxy()->var()->mode() == CONST_LEGACY) {
+          expr->target()->AsVariableProxy()->var()->is_sloppy_function_name()) {
         return;
       }
       Property* prop = expr->value()->AsProperty();
@@ -873,12 +938,12 @@
     }
 
     if (as_init) LoadInitFunction();
-    MachineType mtype;
+    MachineType mtype = MachineType::None();
     bool is_nop = false;
     EmitAssignmentLhs(expr->target(), &mtype);
     EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
     if (!is_nop) {
-      EmitAssignment(expr, mtype);
+      EmitAssignment(expr, mtype, fate);
     }
     if (as_init) UnLoadInitFunction();
   }
@@ -1099,24 +1164,24 @@
       }
       case AsmTyper::kMathAbs: {
         if (call_type == kAstI32) {
-          uint32_t tmp = current_function_builder_->AddLocal(kAstI32);
+          WasmTemporary tmp(current_function_builder_, kAstI32);
 
           // if set_local(tmp, x) < 0
           Visit(call->arguments()->at(0));
-          current_function_builder_->EmitSetLocal(tmp);
+          current_function_builder_->EmitTeeLocal(tmp.index());
           byte code[] = {WASM_I8(0)};
           current_function_builder_->EmitCode(code, sizeof(code));
           current_function_builder_->Emit(kExprI32LtS);
-          current_function_builder_->Emit(kExprIf);
+          current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
 
           // then (0 - tmp)
           current_function_builder_->EmitCode(code, sizeof(code));
-          current_function_builder_->EmitGetLocal(tmp);
+          current_function_builder_->EmitGetLocal(tmp.index());
           current_function_builder_->Emit(kExprI32Sub);
 
           // else tmp
           current_function_builder_->Emit(kExprElse);
-          current_function_builder_->EmitGetLocal(tmp);
+          current_function_builder_->EmitGetLocal(tmp.index());
           // end
           current_function_builder_->Emit(kExprEnd);
 
@@ -1134,25 +1199,25 @@
       case AsmTyper::kMathMin: {
         // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
         if (call_type == kAstI32) {
-          uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
-          uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+          WasmTemporary tmp_x(current_function_builder_, kAstI32);
+          WasmTemporary tmp_y(current_function_builder_, kAstI32);
 
           // if set_local(tmp_x, x) < set_local(tmp_y, y)
           Visit(call->arguments()->at(0));
-          current_function_builder_->EmitSetLocal(tmp_x);
+          current_function_builder_->EmitTeeLocal(tmp_x.index());
 
           Visit(call->arguments()->at(1));
-          current_function_builder_->EmitSetLocal(tmp_y);
+          current_function_builder_->EmitTeeLocal(tmp_y.index());
 
           current_function_builder_->Emit(kExprI32LeS);
-          current_function_builder_->Emit(kExprIf);
+          current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
 
           // then tmp_x
-          current_function_builder_->EmitGetLocal(tmp_x);
+          current_function_builder_->EmitGetLocal(tmp_x.index());
 
           // else tmp_y
           current_function_builder_->Emit(kExprElse);
-          current_function_builder_->EmitGetLocal(tmp_y);
+          current_function_builder_->EmitGetLocal(tmp_y.index());
           current_function_builder_->Emit(kExprEnd);
 
         } else if (call_type == kAstF32) {
@@ -1169,26 +1234,26 @@
       case AsmTyper::kMathMax: {
         // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
         if (call_type == kAstI32) {
-          uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
-          uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+          WasmTemporary tmp_x(current_function_builder_, kAstI32);
+          WasmTemporary tmp_y(current_function_builder_, kAstI32);
 
           // if set_local(tmp_x, x) < set_local(tmp_y, y)
           Visit(call->arguments()->at(0));
 
-          current_function_builder_->EmitSetLocal(tmp_x);
+          current_function_builder_->EmitTeeLocal(tmp_x.index());
 
           Visit(call->arguments()->at(1));
-          current_function_builder_->EmitSetLocal(tmp_y);
+          current_function_builder_->EmitTeeLocal(tmp_y.index());
 
           current_function_builder_->Emit(kExprI32LeS);
-          current_function_builder_->Emit(kExprIf);
+          current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
 
           // then tmp_y
-          current_function_builder_->EmitGetLocal(tmp_y);
+          current_function_builder_->EmitGetLocal(tmp_y.index());
 
           // else tmp_x
           current_function_builder_->Emit(kExprElse);
-          current_function_builder_->EmitGetLocal(tmp_x);
+          current_function_builder_->EmitGetLocal(tmp_x.index());
           current_function_builder_->Emit(kExprEnd);
 
         } else if (call_type == kAstF32) {
@@ -1267,18 +1332,23 @@
     }
   }
 
-  void VisitCall(Call* expr) {
+  void VisitCall(Call* expr) { VisitCallExpression(expr); }
+
+  bool VisitCallExpression(Call* expr) {
     Call::CallType call_type = expr->GetCallType();
+    bool returns_value = true;
     switch (call_type) {
       case Call::OTHER_CALL: {
-        DCHECK_EQ(kFuncScope, scope_);
         VariableProxy* proxy = expr->expression()->AsVariableProxy();
         if (proxy != nullptr) {
+          DCHECK(kFuncScope == scope_ ||
+                 typer_->VariableAsStandardMember(proxy->var()) ==
+                     AsmTyper::kMathFround);
           if (VisitStdlibFunction(expr, proxy)) {
-            return;
+            return true;
           }
         }
-        uint32_t index;
+        DCHECK(kFuncScope == scope_);
         VariableProxy* vp = expr->expression()->AsVariableProxy();
         DCHECK_NOT_NULL(vp);
         if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
@@ -1288,22 +1358,24 @@
                                    args->length());
           if (return_type != kAstStmt) {
             sig.AddReturn(return_type);
+          } else {
+            returns_value = false;
           }
           for (int i = 0; i < args->length(); ++i) {
             sig.AddParam(TypeOf(args->at(i)));
           }
-          index =
-              imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
-          VisitCallArgs(expr);
-          current_function_builder_->Emit(kExprCallImport);
-          current_function_builder_->EmitVarInt(expr->arguments()->length());
-          current_function_builder_->EmitVarInt(index);
-        } else {
-          index = LookupOrInsertFunction(vp->var());
+          uint32_t index = imported_function_table_.LookupOrInsertImport(
+              vp->var(), sig.Build());
           VisitCallArgs(expr);
           current_function_builder_->Emit(kExprCallFunction);
-          current_function_builder_->EmitVarInt(expr->arguments()->length());
           current_function_builder_->EmitVarInt(index);
+        } else {
+          WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
+          VisitCallArgs(expr);
+          current_function_builder_->Emit(kExprCallFunction);
+          current_function_builder_->EmitDirectCallIndex(
+              function->func_index());
+          returns_value = function->signature()->return_count() > 0;
         }
         break;
       }
@@ -1314,18 +1386,28 @@
         VariableProxy* var = p->obj()->AsVariableProxy();
         DCHECK_NOT_NULL(var);
         FunctionTableIndices* indices = LookupFunctionTable(var->var());
-        RECURSE(Visit(p->key()));
+        Visit(p->key());  // TODO(titzer): should use RECURSE()
+
+        // We have to use a temporary for the correct order of evaluation.
         current_function_builder_->EmitI32Const(indices->start_index);
         current_function_builder_->Emit(kExprI32Add);
+        WasmTemporary tmp(current_function_builder_, kAstI32);
+        current_function_builder_->EmitSetLocal(tmp.index());
+
         VisitCallArgs(expr);
+
+        current_function_builder_->EmitGetLocal(tmp.index());
         current_function_builder_->Emit(kExprCallIndirect);
-        current_function_builder_->EmitVarInt(expr->arguments()->length());
         current_function_builder_->EmitVarInt(indices->signature_index);
+        returns_value =
+            builder_->GetSignature(indices->signature_index)->return_count() >
+            0;
         break;
       }
       default:
         UNREACHABLE();
     }
+    return returns_value;
   }
 
   void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -1511,16 +1593,13 @@
       RECURSE(Visit(GetLeft(expr)));
     } else {
       if (expr->op() == Token::COMMA) {
-        current_function_builder_->Emit(kExprBlock);
+        RECURSE(VisitForEffect(expr->left()));
+        RECURSE(Visit(expr->right()));
+        return;
       }
-
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
 
-      if (expr->op() == Token::COMMA) {
-        current_function_builder_->Emit(kExprEnd);
-      }
-
       switch (expr->op()) {
         BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
         BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
@@ -1720,18 +1799,33 @@
     return (reinterpret_cast<IndexContainer*>(entry->value))->index;
   }
 
-  uint32_t LookupOrInsertFunction(Variable* v) {
+  WasmFunctionBuilder* LookupOrInsertFunction(Variable* v) {
     DCHECK_NOT_NULL(builder_);
     ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
-      uint32_t index = builder_->AddFunction();
-      IndexContainer* container = new (zone()) IndexContainer();
-      container->index = index;
+      auto* func_type = typer_->TypeOf(v)->AsFunctionType();
+      DCHECK_NOT_NULL(func_type);
+      // Build the signature for the function.
+      LocalType return_type = TypeFrom(func_type->ReturnType());
+      const auto& arguments = func_type->Arguments();
+      FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+                             arguments.size());
+      if (return_type != kAstStmt) b.AddReturn(return_type);
+      for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
+        LocalType type = TypeFrom(arguments[i]);
+        DCHECK_NE(kAstStmt, type);
+        b.AddParam(type);
+      }
+
+      WasmFunctionBuilder* function = builder_->AddFunction(b.Build());
       entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
                                         ZoneAllocationPolicy(zone()));
-      entry->value = container;
+      function->SetName(
+          reinterpret_cast<const char*>(v->raw_name()->raw_data()),
+          v->raw_name()->length());
+      entry->value = function;
     }
-    return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+    return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
   }
 
   LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
@@ -1766,8 +1860,8 @@
   AsmTyper* typer_;
   ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
   ZoneVector<ForeignVariable> foreign_variables_;
-  uint32_t init_function_index_;
-  uint32_t foreign_init_function_index_;
+  WasmFunctionBuilder* init_function_;
+  WasmFunctionBuilder* foreign_init_function_;
   uint32_t next_table_index_;
   ZoneHashMap function_tables_;
   ImportedFunctionTable imported_function_table_;
@@ -1792,6 +1886,10 @@
   impl.builder_->WriteTo(*buffer);
   return buffer;
 }
+
+const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
+const char* AsmWasmBuilder::single_function_name = "__single_function__";
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/asmjs/asm-wasm-builder.h b/src/asmjs/asm-wasm-builder.h
index 3276c88..9f85dfa 100644
--- a/src/asmjs/asm-wasm-builder.h
+++ b/src/asmjs/asm-wasm-builder.h
@@ -8,8 +8,8 @@
 #include "src/allocation.h"
 #include "src/asmjs/asm-typer.h"
 #include "src/objects.h"
-#include "src/wasm/encoder.h"
-#include "src/zone.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -24,6 +24,9 @@
                           AsmTyper* typer);
   ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
 
+  static const char* foreign_init_name;
+  static const char* single_function_name;
+
  private:
   Isolate* isolate_;
   Zone* zone_;
diff --git a/src/assembler.cc b/src/assembler.cc
index 83dbbe8..b44bc06 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -120,7 +120,7 @@
 double one_half;
 double minus_one_half;
 double negative_infinity;
-double the_hole_nan;
+uint64_t the_hole_nan;
 double uint32_bias;
 };
 
@@ -190,6 +190,7 @@
   if (size == 0) return;
 
 #if defined(USE_SIMULATOR)
+  base::LockGuard<base::Mutex> lock_guard(isolate->simulator_i_cache_mutex());
   Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
 #else
   CpuFeatures::FlushICache(start, size);
@@ -233,22 +234,14 @@
 // Implementation of CpuFeatureScope
 
 #ifdef DEBUG
-CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
+CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+                                 CheckPolicy check)
     : assembler_(assembler) {
-  DCHECK(CpuFeatures::IsSupported(f));
+  DCHECK_IMPLIES(check == kCheckSupported, CpuFeatures::IsSupported(f));
   old_enabled_ = assembler_->enabled_cpu_features();
-  uint64_t mask = static_cast<uint64_t>(1) << f;
-  // TODO(svenpanne) This special case below doesn't belong here!
-#if V8_TARGET_ARCH_ARM
-  // ARMv7 is implied by VFP3.
-  if (f == VFP3) {
-    mask |= static_cast<uint64_t>(1) << ARMv7;
-  }
-#endif
-  assembler_->set_enabled_cpu_features(old_enabled_ | mask);
+  assembler_->EnableCpuFeature(f);
 }
 
-
 CpuFeatureScope::~CpuFeatureScope() {
   assembler_->set_enabled_cpu_features(old_enabled_);
 }
@@ -350,19 +343,18 @@
   DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
   if (IsWasmMemoryReference(rmode_)) {
     Address updated_reference;
-    DCHECK(old_size == 0 || Memory::IsAddressInRange(
-                                old_base, wasm_memory_reference(), old_size));
+    DCHECK_GE(wasm_memory_reference(), old_base);
     updated_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_size == 0 ||
-           Memory::IsAddressInRange(new_base, updated_reference, new_size));
+    // The reference is not checked here but at runtime. Validity of references
+    // may change over time.
     unchecked_update_wasm_memory_reference(updated_reference,
                                            icache_flush_mode);
   } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(old_size == 0 || wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
+    uint32_t current_size_reference = wasm_memory_size_reference();
+    DCHECK(old_size == 0 || current_size_reference <= old_size);
+    uint32_t offset = old_size - current_size_reference;
+    DCHECK_GE(new_size, offset);
+    uint32_t updated_size_reference = new_size - offset;
     unchecked_update_wasm_memory_size(updated_size_reference,
                                       icache_flush_mode);
   } else {
@@ -930,7 +922,7 @@
   double_constants.min_int = kMinInt;
   double_constants.one_half = 0.5;
   double_constants.minus_one_half = -0.5;
-  double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
+  double_constants.the_hole_nan = kHoleNanInt64;
   double_constants.negative_infinity = -V8_INFINITY;
   double_constants.uint32_bias =
     static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@@ -1601,17 +1593,6 @@
 }
 
 
-ExternalReference ExternalReference::virtual_handler_register(
-    Isolate* isolate) {
-  return ExternalReference(isolate->virtual_handler_register_address());
-}
-
-
-ExternalReference ExternalReference::virtual_slot_register(Isolate* isolate) {
-  return ExternalReference(isolate->virtual_slot_register_address());
-}
-
-
 ExternalReference ExternalReference::runtime_function_table_address(
     Isolate* isolate) {
   return ExternalReference(
diff --git a/src/assembler.h b/src/assembler.h
index 77beac1..a925032 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -80,9 +80,14 @@
   void set_enabled_cpu_features(uint64_t features) {
     enabled_cpu_features_ = features;
   }
+  // Features are usually enabled by CpuFeatureScope, which also asserts that
+  // the features are supported before they are enabled.
   bool IsEnabled(CpuFeature f) {
     return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
   }
+  void EnableCpuFeature(CpuFeature f) {
+    enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
+  }
 
   bool is_constant_pool_available() const {
     if (FLAG_enable_embedded_constant_pool) {
@@ -184,15 +189,22 @@
 // Enable a specified feature within a scope.
 class CpuFeatureScope BASE_EMBEDDED {
  public:
+  enum CheckPolicy {
+    kCheckSupported,
+    kDontCheckSupported,
+  };
+
 #ifdef DEBUG
-  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f);
+  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+                  CheckPolicy check = kCheckSupported);
   ~CpuFeatureScope();
 
  private:
   AssemblerBase* assembler_;
   uint64_t old_enabled_;
 #else
-  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {}
+  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+                  CheckPolicy check = kCheckSupported) {}
 #endif
 };
 
@@ -1035,9 +1047,6 @@
   static ExternalReference invoke_function_callback(Isolate* isolate);
   static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
 
-  static ExternalReference virtual_handler_register(Isolate* isolate);
-  static ExternalReference virtual_slot_register(Isolate* isolate);
-
   static ExternalReference runtime_function_table_address(Isolate* isolate);
 
   Address address() const { return reinterpret_cast<Address>(address_); }
diff --git a/src/assert-scope.h b/src/assert-scope.h
index 84e6990..fde49f8 100644
--- a/src/assert-scope.h
+++ b/src/assert-scope.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 #include "src/base/macros.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -33,14 +34,13 @@
   COMPILATION_ASSERT
 };
 
-
 template <PerThreadAssertType kType, bool kAllow>
 class PerThreadAssertScope {
  public:
-  PerThreadAssertScope();
-  ~PerThreadAssertScope();
+  V8_EXPORT_PRIVATE PerThreadAssertScope();
+  V8_EXPORT_PRIVATE ~PerThreadAssertScope();
 
-  static bool IsAllowed();
+  V8_EXPORT_PRIVATE static bool IsAllowed();
 
  private:
   PerThreadAssertData* data_;
diff --git a/src/ast/OWNERS b/src/ast/OWNERS
index 65a00bc..b4e1473 100644
--- a/src/ast/OWNERS
+++ b/src/ast/OWNERS
@@ -3,6 +3,7 @@
 adamk@chromium.org
 bmeurer@chromium.org
 littledan@chromium.org
+marja@chromium.org
 mstarzinger@chromium.org
 rossberg@chromium.org
 verwaest@chromium.org
diff --git a/src/ast/ast-expression-rewriter.cc b/src/ast/ast-expression-rewriter.cc
index 7bb8f08..c4fa71b 100644
--- a/src/ast/ast-expression-rewriter.cc
+++ b/src/ast/ast-expression-rewriter.cc
@@ -201,11 +201,10 @@
   AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
   ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
   for (int i = 0; i < properties->length(); i++) {
-    VisitObjectLiteralProperty(properties->at(i));
+    VisitLiteralProperty(properties->at(i));
   }
 }
 
-
 void AstExpressionRewriter::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* node) {
   REWRITE_THIS(node);
@@ -243,13 +242,11 @@
   REWRITE_THIS(node);
   ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
   for (int i = 0; i < properties->length(); i++) {
-    VisitObjectLiteralProperty(properties->at(i));
+    VisitLiteralProperty(properties->at(i));
   }
 }
 
-
-void AstExpressionRewriter::VisitObjectLiteralProperty(
-    ObjectLiteralProperty* property) {
+void AstExpressionRewriter::VisitLiteralProperty(LiteralProperty* property) {
   if (property == nullptr) return;
   AST_REWRITE_PROPERTY(Expression, property, key);
   AST_REWRITE_PROPERTY(Expression, property, value);
diff --git a/src/ast/ast-expression-rewriter.h b/src/ast/ast-expression-rewriter.h
index ac45d76..dfed3e1 100644
--- a/src/ast/ast-expression-rewriter.h
+++ b/src/ast/ast-expression-rewriter.h
@@ -9,7 +9,7 @@
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/type-info.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -33,7 +33,7 @@
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
-  virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+  virtual void VisitLiteralProperty(LiteralProperty* property);
 
  protected:
   virtual bool RewriteExpression(Expression* expr) = 0;
diff --git a/src/ast/ast-literal-reindexer.cc b/src/ast/ast-literal-reindexer.cc
index a349ae0..81a5225 100644
--- a/src/ast/ast-literal-reindexer.cc
+++ b/src/ast/ast-literal-reindexer.cc
@@ -249,21 +249,18 @@
     VisitVariableProxy(node->class_variable_proxy());
   }
   for (int i = 0; i < node->properties()->length(); i++) {
-    VisitObjectLiteralProperty(node->properties()->at(i));
+    VisitLiteralProperty(node->properties()->at(i));
   }
 }
 
-
 void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
   UpdateIndex(node);
   for (int i = 0; i < node->properties()->length(); i++) {
-    VisitObjectLiteralProperty(node->properties()->at(i));
+    VisitLiteralProperty(node->properties()->at(i));
   }
 }
 
-
-void AstLiteralReindexer::VisitObjectLiteralProperty(
-    ObjectLiteralProperty* node) {
+void AstLiteralReindexer::VisitLiteralProperty(LiteralProperty* node) {
   Visit(node->key());
   Visit(node->value());
 }
diff --git a/src/ast/ast-literal-reindexer.h b/src/ast/ast-literal-reindexer.h
index b33e0c5..4e0ca6b 100644
--- a/src/ast/ast-literal-reindexer.h
+++ b/src/ast/ast-literal-reindexer.h
@@ -26,7 +26,7 @@
   void VisitStatements(ZoneList<Statement*>* statements);
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void VisitArguments(ZoneList<Expression*>* arguments);
-  void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+  void VisitLiteralProperty(LiteralProperty* property);
 
   void UpdateIndex(MaterializedLiteral* literal) {
     literal->literal_index_ = next_index_++;
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index 1b9905a..e1b11f6 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -39,7 +39,7 @@
   void VisitStatements(ZoneList<Statement*>* statements);
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void VisitArguments(ZoneList<Expression*>* arguments);
-  void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+  void VisitLiteralProperty(LiteralProperty* property);
 
   int ReserveIdRange(int n) {
     int tmp = next_id_;
@@ -233,14 +233,6 @@
 void AstNumberingVisitor::VisitBlock(Block* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(Block::num_ids()));
-
-  if (FLAG_ignition && node->scope() != nullptr &&
-      node->scope()->NeedsContext()) {
-    // Create ScopeInfo while on the main thread to avoid allocation during
-    // potentially concurrent bytecode generation.
-    node->scope()->GetScopeInfo(isolate_);
-  }
-
   if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
   VisitStatements(node->statements());
 }
@@ -257,6 +249,27 @@
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
   VisitArguments(node->arguments());
+  // To support catch prediction within async/await:
+  //
+  // The AstNumberingVisitor is when catch prediction currently occurs, and it
+  // is the only common point that has access to this information. The parser
+  // just doesn't know yet. Take the following two cases of catch prediction:
+  //
+  // try { await fn(); } catch (e) { }
+  // try { await fn(); } finally { }
+  //
+  // When parsing the await that we want to mark as caught or uncaught, it's
+  // not yet known whether it will be followed by a 'finally' or a 'catch.
+  // The AstNumberingVisitor is what learns whether it is caught. To make
+  // the information available later to the runtime, the AstNumberingVisitor
+  // has to stash it somewhere. Changing the runtime function into another
+  // one in ast-numbering seemed like a simple and straightforward solution to
+  // that problem.
+  if (node->is_jsruntime() &&
+      node->context_index() == Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX &&
+      catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
+    node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
+  }
 }
 
 
@@ -370,6 +383,7 @@
   node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
   Visit(node->left());
   Visit(node->right());
+  ReserveFeedbackSlots(node);
 }
 
 
@@ -444,6 +458,7 @@
   node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
   if (!node->is_default()) Visit(node->label());
   VisitStatements(node->statements());
+  ReserveFeedbackSlots(node);
 }
 
 
@@ -470,7 +485,7 @@
     VisitVariableProxy(node->class_variable_proxy());
   }
   for (int i = 0; i < node->properties()->length(); i++) {
-    VisitObjectLiteralProperty(node->properties()->at(i));
+    VisitLiteralProperty(node->properties()->at(i));
   }
   ReserveFeedbackSlots(node);
 }
@@ -480,7 +495,7 @@
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(node->num_ids()));
   for (int i = 0; i < node->properties()->length(); i++) {
-    VisitObjectLiteralProperty(node->properties()->at(i));
+    VisitLiteralProperty(node->properties()->at(i));
   }
   node->BuildConstantProperties(isolate_);
   // Mark all computed expressions that are bound to a key that
@@ -490,15 +505,12 @@
   ReserveFeedbackSlots(node);
 }
 
-
-void AstNumberingVisitor::VisitObjectLiteralProperty(
-    ObjectLiteralProperty* node) {
+void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
   if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
   Visit(node->key());
   Visit(node->value());
 }
 
-
 void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(node->num_ids()));
@@ -570,27 +582,22 @@
 bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   DeclarationScope* scope = node->scope();
   if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
-  if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
+  if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
   if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
     DisableCrankshaft(kContextAllocatedArguments);
   }
 
-  int rest_index;
-  if (scope->rest_parameter(&rest_index)) {
+  if (scope->rest_parameter() != nullptr) {
     DisableCrankshaft(kRestParameter);
   }
 
-  if (FLAG_ignition && scope->NeedsContext() && scope->is_script_scope()) {
-    // Create ScopeInfo while on the main thread to avoid allocation during
-    // potentially concurrent bytecode generation.
-    node->scope()->GetScopeInfo(isolate_);
-  }
-
   if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
-    // TODO(neis): We may want to allow Turbofan optimization here if
-    // --turbo-from-bytecode is set and we know that Ignition is used.
-    // Unfortunately we can't express that here.
-    DisableOptimization(kGenerator);
+    // Generators can be optimized if --turbo-from-bytecode is set.
+    if (FLAG_turbo_from_bytecode) {
+      DisableCrankshaft(kGenerator);
+    } else {
+      DisableOptimization(kGenerator);
+    }
   }
 
   VisitDeclarations(scope->declarations());
diff --git a/src/ast/ast-traversal-visitor.h b/src/ast/ast-traversal-visitor.h
index 0f2976c..e0f88e1 100644
--- a/src/ast/ast-traversal-visitor.h
+++ b/src/ast/ast-traversal-visitor.h
@@ -447,9 +447,9 @@
     RECURSE_EXPRESSION(Visit(expr->extends()));
   }
   RECURSE_EXPRESSION(Visit(expr->constructor()));
-  ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+  ZoneList<ClassLiteralProperty*>* props = expr->properties();
   for (int i = 0; i < props->length(); ++i) {
-    ObjectLiteralProperty* prop = props->at(i);
+    ClassLiteralProperty* prop = props->at(i);
     if (!prop->key()->IsLiteral()) {
       RECURSE_EXPRESSION(Visit(prop->key()));
     }
diff --git a/src/ast/ast-type-bounds.h b/src/ast/ast-type-bounds.h
index ec26fdf..0d1a3c8 100644
--- a/src/ast/ast-type-bounds.h
+++ b/src/ast/ast-type-bounds.h
@@ -7,8 +7,8 @@
 #ifndef V8_AST_AST_TYPE_BOUNDS_H_
 #define V8_AST_AST_TYPE_BOUNDS_H_
 
-#include "src/types.h"
-#include "src/zone-containers.h"
+#include "src/ast/ast-types.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -20,18 +20,18 @@
   explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
   ~AstTypeBounds() {}
 
-  Bounds get(Expression* expression) const {
-    ZoneMap<Expression*, Bounds>::const_iterator i =
+  AstBounds get(Expression* expression) const {
+    ZoneMap<Expression*, AstBounds>::const_iterator i =
         bounds_map_.find(expression);
-    return (i != bounds_map_.end()) ? i->second : Bounds::Unbounded();
+    return (i != bounds_map_.end()) ? i->second : AstBounds::Unbounded();
   }
 
-  void set(Expression* expression, Bounds bounds) {
+  void set(Expression* expression, AstBounds bounds) {
     bounds_map_[expression] = bounds;
   }
 
  private:
-  ZoneMap<Expression*, Bounds> bounds_map_;
+  ZoneMap<Expression*, AstBounds> bounds_map_;
 };
 
 }  // namespace internal
diff --git a/src/types.cc b/src/ast/ast-types.cc
similarity index 70%
rename from src/types.cc
rename to src/ast/ast-types.cc
index c978dac..a075e8e 100644
--- a/src/types.cc
+++ b/src/ast/ast-types.cc
@@ -4,7 +4,7 @@
 
 #include <iomanip>
 
-#include "src/types.h"
+#include "src/ast/ast-types.h"
 
 #include "src/handles-inl.h"
 #include "src/ostreams.h"
@@ -12,21 +12,20 @@
 namespace v8 {
 namespace internal {
 
-
 // NOTE: If code is marked as being a "shortcut", this means that removing
 // the code won't affect the semantics of the surrounding function definition.
 
 // static
-bool Type::IsInteger(i::Object* x) {
-  return x->IsNumber() && Type::IsInteger(x->Number());
+bool AstType::IsInteger(i::Object* x) {
+  return x->IsNumber() && AstType::IsInteger(x->Number());
 }
 
 // -----------------------------------------------------------------------------
 // Range-related helper functions.
 
-bool RangeType::Limits::IsEmpty() { return this->min > this->max; }
+bool AstRangeType::Limits::IsEmpty() { return this->min > this->max; }
 
-RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
+AstRangeType::Limits AstRangeType::Limits::Intersect(Limits lhs, Limits rhs) {
   DisallowHeapAllocation no_allocation;
   Limits result(lhs);
   if (lhs.min < rhs.min) result.min = rhs.min;
@@ -34,7 +33,7 @@
   return result;
 }
 
-RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
+AstRangeType::Limits AstRangeType::Limits::Union(Limits lhs, Limits rhs) {
   DisallowHeapAllocation no_allocation;
   if (lhs.IsEmpty()) return rhs;
   if (rhs.IsEmpty()) return lhs;
@@ -44,38 +43,36 @@
   return result;
 }
 
-bool Type::Overlap(RangeType* lhs, RangeType* rhs) {
+bool AstType::Overlap(AstRangeType* lhs, AstRangeType* rhs) {
   DisallowHeapAllocation no_allocation;
-  return !RangeType::Limits::Intersect(RangeType::Limits(lhs),
-                                       RangeType::Limits(rhs))
+  return !AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs),
+                                          AstRangeType::Limits(rhs))
               .IsEmpty();
 }
 
-bool Type::Contains(RangeType* lhs, RangeType* rhs) {
+bool AstType::Contains(AstRangeType* lhs, AstRangeType* rhs) {
   DisallowHeapAllocation no_allocation;
   return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
 }
 
-bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
+bool AstType::Contains(AstRangeType* lhs, AstConstantType* rhs) {
   DisallowHeapAllocation no_allocation;
-  return IsInteger(*rhs->Value()) &&
-         lhs->Min() <= rhs->Value()->Number() &&
+  return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
          rhs->Value()->Number() <= lhs->Max();
 }
 
-bool Type::Contains(RangeType* range, i::Object* val) {
+bool AstType::Contains(AstRangeType* range, i::Object* val) {
   DisallowHeapAllocation no_allocation;
-  return IsInteger(val) &&
-         range->Min() <= val->Number() && val->Number() <= range->Max();
+  return IsInteger(val) && range->Min() <= val->Number() &&
+         val->Number() <= range->Max();
 }
 
-
 // -----------------------------------------------------------------------------
 // Min and Max computation.
 
-double Type::Min() {
+double AstType::Min() {
   DCHECK(this->SemanticIs(Number()));
-  if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
+  if (this->IsBitset()) return AstBitsetType::Min(this->AsBitset());
   if (this->IsUnion()) {
     double min = +V8_INFINITY;
     for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
@@ -89,9 +86,9 @@
   return 0;
 }
 
-double Type::Max() {
+double AstType::Max() {
   DCHECK(this->SemanticIs(Number()));
-  if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
+  if (this->IsBitset()) return AstBitsetType::Max(this->AsBitset());
   if (this->IsUnion()) {
     double max = -V8_INFINITY;
     for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
@@ -105,13 +102,11 @@
   return 0;
 }
 
-
 // -----------------------------------------------------------------------------
 // Glb and lub computation.
 
-
 // The largest bitset subsumed by this type.
-Type::bitset BitsetType::Glb(Type* type) {
+AstType::bitset AstBitsetType::Glb(AstType* type) {
   DisallowHeapAllocation no_allocation;
   // Fast case.
   if (IsBitset(type)) {
@@ -119,19 +114,18 @@
   } else if (type->IsUnion()) {
     SLOW_DCHECK(type->AsUnion()->Wellformed());
     return type->AsUnion()->Get(0)->BitsetGlb() |
-           SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb());  // Shortcut.
+           AST_SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb());  // Shortcut.
   } else if (type->IsRange()) {
-    bitset glb = SEMANTIC(
-        BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
-    return glb | REPRESENTATION(type->BitsetLub());
+    bitset glb = AST_SEMANTIC(
+        AstBitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
+    return glb | AST_REPRESENTATION(type->BitsetLub());
   } else {
     return type->Representation();
   }
 }
 
-
 // The smallest bitset subsuming this type, possibly not a proper one.
-Type::bitset BitsetType::Lub(Type* type) {
+AstType::bitset AstBitsetType::Lub(AstType* type) {
   DisallowHeapAllocation no_allocation;
   if (IsBitset(type)) return type->AsBitset();
   if (type->IsUnion()) {
@@ -140,7 +134,7 @@
     int bitset = type->AsUnion()->Get(0)->BitsetLub();
     for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
       // Other elements only contribute their semantic part.
-      bitset |= SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
+      bitset |= AST_SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
     }
     return bitset;
   }
@@ -155,7 +149,7 @@
   return kNone;
 }
 
-Type::bitset BitsetType::Lub(i::Map* map) {
+AstType::bitset AstBitsetType::Lub(i::Map* map) {
   DisallowHeapAllocation no_allocation;
   switch (map->instance_type()) {
     case STRING_TYPE:
@@ -214,7 +208,6 @@
     case JS_DATE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -224,6 +217,7 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
+    case JS_STRING_ITERATOR_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -250,6 +244,7 @@
     case SCRIPT_TYPE:
     case CODE_TYPE:
     case PROPERTY_CELL_TYPE:
+    case MODULE_TYPE:
       return kOtherInternal & kTaggedPointer;
 
     // Remaining instance types are unsupported for now. If any of them do
@@ -265,6 +260,7 @@
     case ACCESS_CHECK_INFO_TYPE:
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
+    case PROMISE_CONTAINER_TYPE:
     case FUNCTION_TEMPLATE_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
     case SIGNATURE_INFO_TYPE:
@@ -278,7 +274,7 @@
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
-    case SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE:
+    case CONTEXT_EXTENSION_TYPE:
       UNREACHABLE();
       return kNone;
   }
@@ -286,16 +282,16 @@
   return kNone;
 }
 
-Type::bitset BitsetType::Lub(i::Object* value) {
+AstType::bitset AstBitsetType::Lub(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   if (value->IsNumber()) {
     return Lub(value->Number()) &
-        (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
+           (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
   }
   return Lub(i::HeapObject::cast(value)->map());
 }
 
-Type::bitset BitsetType::Lub(double value) {
+AstType::bitset AstBitsetType::Lub(double value) {
   DisallowHeapAllocation no_allocation;
   if (i::IsMinusZero(value)) return kMinusZero;
   if (std::isnan(value)) return kNaN;
@@ -303,9 +299,8 @@
   return kOtherNumber;
 }
 
-
 // Minimum values of plain numeric bitsets.
-const BitsetType::Boundary BitsetType::BoundariesArray[] = {
+const AstBitsetType::Boundary AstBitsetType::BoundariesArray[] = {
     {kOtherNumber, kPlainNumber, -V8_INFINITY},
     {kOtherSigned32, kNegative32, kMinInt},
     {kNegative31, kNegative31, -0x40000000},
@@ -314,45 +309,47 @@
     {kOtherUnsigned32, kUnsigned32, 0x80000000},
     {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
 
-const BitsetType::Boundary* BitsetType::Boundaries() { return BoundariesArray; }
+const AstBitsetType::Boundary* AstBitsetType::Boundaries() {
+  return BoundariesArray;
+}
 
-size_t BitsetType::BoundariesSize() {
+size_t AstBitsetType::BoundariesSize() {
   // Windows doesn't like arraysize here.
   // return arraysize(BoundariesArray);
   return 7;
 }
 
-Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
+AstType::bitset AstBitsetType::ExpandInternals(AstType::bitset bits) {
   DisallowHeapAllocation no_allocation;
-  if (!(bits & SEMANTIC(kPlainNumber))) return bits;  // Shortcut.
+  if (!(bits & AST_SEMANTIC(kPlainNumber))) return bits;  // Shortcut.
   const Boundary* boundaries = Boundaries();
   for (size_t i = 0; i < BoundariesSize(); ++i) {
-    DCHECK(BitsetType::Is(boundaries[i].internal, boundaries[i].external));
-    if (bits & SEMANTIC(boundaries[i].internal))
-      bits |= SEMANTIC(boundaries[i].external);
+    DCHECK(AstBitsetType::Is(boundaries[i].internal, boundaries[i].external));
+    if (bits & AST_SEMANTIC(boundaries[i].internal))
+      bits |= AST_SEMANTIC(boundaries[i].external);
   }
   return bits;
 }
 
-Type::bitset BitsetType::Lub(double min, double max) {
+AstType::bitset AstBitsetType::Lub(double min, double max) {
   DisallowHeapAllocation no_allocation;
   int lub = kNone;
   const Boundary* mins = Boundaries();
 
   for (size_t i = 1; i < BoundariesSize(); ++i) {
     if (min < mins[i].min) {
-      lub |= mins[i-1].internal;
+      lub |= mins[i - 1].internal;
       if (max < mins[i].min) return lub;
     }
   }
   return lub | mins[BoundariesSize() - 1].internal;
 }
 
-Type::bitset BitsetType::NumberBits(bitset bits) {
-  return SEMANTIC(bits & kPlainNumber);
+AstType::bitset AstBitsetType::NumberBits(bitset bits) {
+  return AST_SEMANTIC(bits & kPlainNumber);
 }
 
-Type::bitset BitsetType::Glb(double min, double max) {
+AstType::bitset AstBitsetType::Glb(double min, double max) {
   DisallowHeapAllocation no_allocation;
   int glb = kNone;
   const Boundary* mins = Boundaries();
@@ -368,16 +365,16 @@
   }
   // OtherNumber also contains float numbers, so it can never be
   // in the greatest lower bound.
-  return glb & ~(SEMANTIC(kOtherNumber));
+  return glb & ~(AST_SEMANTIC(kOtherNumber));
 }
 
-double BitsetType::Min(bitset bits) {
+double AstBitsetType::Min(bitset bits) {
   DisallowHeapAllocation no_allocation;
-  DCHECK(Is(SEMANTIC(bits), kNumber));
+  DCHECK(Is(AST_SEMANTIC(bits), kNumber));
   const Boundary* mins = Boundaries();
-  bool mz = SEMANTIC(bits & kMinusZero);
+  bool mz = AST_SEMANTIC(bits & kMinusZero);
   for (size_t i = 0; i < BoundariesSize(); ++i) {
-    if (Is(SEMANTIC(mins[i].internal), bits)) {
+    if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
       return mz ? std::min(0.0, mins[i].min) : mins[i].min;
     }
   }
@@ -385,50 +382,49 @@
   return std::numeric_limits<double>::quiet_NaN();
 }
 
-double BitsetType::Max(bitset bits) {
+double AstBitsetType::Max(bitset bits) {
   DisallowHeapAllocation no_allocation;
-  DCHECK(Is(SEMANTIC(bits), kNumber));
+  DCHECK(Is(AST_SEMANTIC(bits), kNumber));
   const Boundary* mins = Boundaries();
-  bool mz = SEMANTIC(bits & kMinusZero);
-  if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].internal), bits)) {
+  bool mz = AST_SEMANTIC(bits & kMinusZero);
+  if (AstBitsetType::Is(AST_SEMANTIC(mins[BoundariesSize() - 1].internal),
+                        bits)) {
     return +V8_INFINITY;
   }
   for (size_t i = BoundariesSize() - 1; i-- > 0;) {
-    if (Is(SEMANTIC(mins[i].internal), bits)) {
-      return mz ?
-          std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
+    if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
+      return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
     }
   }
   if (mz) return 0;
   return std::numeric_limits<double>::quiet_NaN();
 }
 
-
 // -----------------------------------------------------------------------------
 // Predicates.
 
-bool Type::SimplyEquals(Type* that) {
+bool AstType::SimplyEquals(AstType* that) {
   DisallowHeapAllocation no_allocation;
   if (this->IsClass()) {
-    return that->IsClass()
-        && *this->AsClass()->Map() == *that->AsClass()->Map();
+    return that->IsClass() &&
+           *this->AsClass()->Map() == *that->AsClass()->Map();
   }
   if (this->IsConstant()) {
-    return that->IsConstant()
-        && *this->AsConstant()->Value() == *that->AsConstant()->Value();
+    return that->IsConstant() &&
+           *this->AsConstant()->Value() == *that->AsConstant()->Value();
   }
   if (this->IsContext()) {
-    return that->IsContext()
-        && this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
+    return that->IsContext() &&
+           this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
   }
   if (this->IsArray()) {
-    return that->IsArray()
-        && this->AsArray()->Element()->Equals(that->AsArray()->Element());
+    return that->IsArray() &&
+           this->AsArray()->Element()->Equals(that->AsArray()->Element());
   }
   if (this->IsFunction()) {
     if (!that->IsFunction()) return false;
-    FunctionType* this_fun = this->AsFunction();
-    FunctionType* that_fun = that->AsFunction();
+    AstFunctionType* this_fun = this->AsFunction();
+    AstFunctionType* that_fun = that->AsFunction();
     if (this_fun->Arity() != that_fun->Arity() ||
         !this_fun->Result()->Equals(that_fun->Result()) ||
         !this_fun->Receiver()->Equals(that_fun->Receiver())) {
@@ -441,8 +437,8 @@
   }
   if (this->IsTuple()) {
     if (!that->IsTuple()) return false;
-    TupleType* this_tuple = this->AsTuple();
-    TupleType* that_tuple = that->AsTuple();
+    AstTupleType* this_tuple = this->AsTuple();
+    AstTupleType* that_tuple = that->AsTuple();
     if (this_tuple->Arity() != that_tuple->Arity()) {
       return false;
     }
@@ -455,26 +451,25 @@
   return false;
 }
 
-Type::bitset Type::Representation() {
-  return REPRESENTATION(this->BitsetLub());
+AstType::bitset AstType::Representation() {
+  return AST_REPRESENTATION(this->BitsetLub());
 }
 
-
 // Check if [this] <= [that].
-bool Type::SlowIs(Type* that) {
+bool AstType::SlowIs(AstType* that) {
   DisallowHeapAllocation no_allocation;
 
   // Fast bitset cases
   if (that->IsBitset()) {
-    return BitsetType::Is(this->BitsetLub(), that->AsBitset());
+    return AstBitsetType::Is(this->BitsetLub(), that->AsBitset());
   }
 
   if (this->IsBitset()) {
-    return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
+    return AstBitsetType::Is(this->AsBitset(), that->BitsetGlb());
   }
 
   // Check the representations.
-  if (!BitsetType::Is(Representation(), that->Representation())) {
+  if (!AstBitsetType::Is(Representation(), that->Representation())) {
     return false;
   }
 
@@ -482,19 +477,19 @@
   return SemanticIs(that);
 }
 
-
-// Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
+// Check if AST_SEMANTIC([this]) <= AST_SEMANTIC([that]). The result of the
+// method
 // should be independent of the representation axis of the types.
-bool Type::SemanticIs(Type* that) {
+bool AstType::SemanticIs(AstType* that) {
   DisallowHeapAllocation no_allocation;
 
   if (this == that) return true;
 
   if (that->IsBitset()) {
-    return BitsetType::Is(SEMANTIC(this->BitsetLub()), that->AsBitset());
+    return AstBitsetType::Is(AST_SEMANTIC(this->BitsetLub()), that->AsBitset());
   }
   if (this->IsBitset()) {
-    return BitsetType::Is(SEMANTIC(this->AsBitset()), that->BitsetGlb());
+    return AstBitsetType::Is(AST_SEMANTIC(this->AsBitset()), that->BitsetGlb());
   }
 
   // (T1 \/ ... \/ Tn) <= T  if  (T1 <= T) /\ ... /\ (Tn <= T)
@@ -525,7 +520,7 @@
 }
 
 // Most precise _current_ type of a value (usually its class).
-Type* Type::NowOf(i::Object* value, Zone* zone) {
+AstType* AstType::NowOf(i::Object* value, Zone* zone) {
   if (value->IsSmi() ||
       i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
     return Of(value, zone);
@@ -533,7 +528,7 @@
   return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
 }
 
-bool Type::NowContains(i::Object* value) {
+bool AstType::NowContains(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   if (this->IsAny()) return true;
   if (value->IsHeapObject()) {
@@ -545,7 +540,7 @@
   return this->Contains(value);
 }
 
-bool Type::NowIs(Type* that) {
+bool AstType::NowIs(AstType* that) {
   DisallowHeapAllocation no_allocation;
 
   // TODO(rossberg): this is incorrect for
@@ -563,27 +558,25 @@
   return this->Is(that);
 }
 
-
 // Check if [this] contains only (currently) stable classes.
-bool Type::NowStable() {
+bool AstType::NowStable() {
   DisallowHeapAllocation no_allocation;
   return !this->IsClass() || this->AsClass()->Map()->is_stable();
 }
 
-
 // Check if [this] and [that] overlap.
-bool Type::Maybe(Type* that) {
+bool AstType::Maybe(AstType* that) {
   DisallowHeapAllocation no_allocation;
 
   // Take care of the representation part (and also approximate
   // the semantic part).
-  if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+  if (!AstBitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
     return false;
 
   return SemanticMaybe(that);
 }
 
-bool Type::SemanticMaybe(Type* that) {
+bool AstType::SemanticMaybe(AstType* that) {
   DisallowHeapAllocation no_allocation;
 
   // (T1 \/ ... \/ Tn) overlaps T  if  (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -602,7 +595,8 @@
     return false;
   }
 
-  if (!BitsetType::SemanticIsInhabited(this->BitsetLub() & that->BitsetLub()))
+  if (!AstBitsetType::SemanticIsInhabited(this->BitsetLub() &
+                                          that->BitsetLub()))
     return false;
 
   if (this->IsBitset() && that->IsBitset()) return true;
@@ -617,12 +611,12 @@
       return Overlap(this->AsRange(), that->AsRange());
     }
     if (that->IsBitset()) {
-      bitset number_bits = BitsetType::NumberBits(that->AsBitset());
-      if (number_bits == BitsetType::kNone) {
+      bitset number_bits = AstBitsetType::NumberBits(that->AsBitset());
+      if (number_bits == AstBitsetType::kNone) {
         return false;
       }
-      double min = std::max(BitsetType::Min(number_bits), this->Min());
-      double max = std::min(BitsetType::Max(number_bits), this->Max());
+      double min = std::max(AstBitsetType::Min(number_bits), this->Min());
+      double max = std::min(AstBitsetType::Max(number_bits), this->Max());
       return min <= max;
     }
   }
@@ -635,9 +629,8 @@
   return this->SimplyEquals(that);
 }
 
-
 // Return the range in [this], or [NULL].
-Type* Type::GetRange() {
+AstType* AstType::GetRange() {
   DisallowHeapAllocation no_allocation;
   if (this->IsRange()) return this;
   if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
@@ -646,19 +639,19 @@
   return NULL;
 }
 
-bool Type::Contains(i::Object* value) {
+bool AstType::Contains(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
     if (*it.Current() == value) return true;
   }
   if (IsInteger(value)) {
-    Type* range = this->GetRange();
+    AstType* range = this->GetRange();
     if (range != NULL && Contains(range->AsRange(), value)) return true;
   }
-  return BitsetType::New(BitsetType::Lub(value))->Is(this);
+  return AstBitsetType::New(AstBitsetType::Lub(value))->Is(this);
 }
 
-bool UnionType::Wellformed() {
+bool AstUnionType::Wellformed() {
   DisallowHeapAllocation no_allocation;
   // This checks the invariants of the union representation:
   // 1. There are at least two elements.
@@ -668,7 +661,7 @@
   // 5. No element (except the bitset) is a subtype of any other.
   // 6. If there is a range, then the bitset type does not contain
   //    plain number bits.
-  DCHECK(this->Length() >= 2);  // (1)
+  DCHECK(this->Length() >= 2);       // (1)
   DCHECK(this->Get(0)->IsBitset());  // (2a)
 
   for (int i = 0; i < this->Length(); ++i) {
@@ -681,26 +674,23 @@
     }
   }
   DCHECK(!this->Get(1)->IsRange() ||
-         (BitsetType::NumberBits(this->Get(0)->AsBitset()) ==
-          BitsetType::kNone));  // (6)
+         (AstBitsetType::NumberBits(this->Get(0)->AsBitset()) ==
+          AstBitsetType::kNone));  // (6)
   return true;
 }
 
-
 // -----------------------------------------------------------------------------
 // Union and intersection
 
-
 static bool AddIsSafe(int x, int y) {
-  return x >= 0 ?
-      y <= std::numeric_limits<int>::max() - x :
-      y >= std::numeric_limits<int>::min() - x;
+  return x >= 0 ? y <= std::numeric_limits<int>::max() - x
+                : y >= std::numeric_limits<int>::min() - x;
 }
 
-Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
+AstType* AstType::Intersect(AstType* type1, AstType* type2, Zone* zone) {
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
-    return BitsetType::New(type1->AsBitset() & type2->AsBitset());
+    return AstBitsetType::New(type1->AsBitset() & type2->AsBitset());
   }
 
   // Fast case: top or bottom types.
@@ -731,38 +721,39 @@
   }
 
   bitset bits =
-      SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
+      AST_SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
   int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
   int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
   if (!AddIsSafe(size1, size2)) return Any();
   int size = size1 + size2;
   if (!AddIsSafe(size, 2)) return Any();
   size += 2;
-  Type* result_type = UnionType::New(size, zone);
-  UnionType* result = result_type->AsUnion();
+  AstType* result_type = AstUnionType::New(size, zone);
+  AstUnionType* result = result_type->AsUnion();
   size = 0;
 
   // Deal with bitsets.
-  result->Set(size++, BitsetType::New(bits));
+  result->Set(size++, AstBitsetType::New(bits));
 
-  RangeType::Limits lims = RangeType::Limits::Empty();
+  AstRangeType::Limits lims = AstRangeType::Limits::Empty();
   size = IntersectAux(type1, type2, result, size, &lims, zone);
 
   // If the range is not empty, then insert it into the union and
   // remove the number bits from the bitset.
   if (!lims.IsEmpty()) {
-    size = UpdateRange(RangeType::New(lims, representation, zone), result, size,
-                       zone);
+    size = UpdateRange(AstRangeType::New(lims, representation, zone), result,
+                       size, zone);
 
     // Remove the number bits.
-    bitset number_bits = BitsetType::NumberBits(bits);
+    bitset number_bits = AstBitsetType::NumberBits(bits);
     bits &= ~number_bits;
-    result->Set(0, BitsetType::New(bits));
+    result->Set(0, AstBitsetType::New(bits));
   }
   return NormalizeUnion(result_type, size, zone);
 }
 
-int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
+int AstType::UpdateRange(AstType* range, AstUnionType* result, int size,
+                         Zone* zone) {
   if (size == 1) {
     result->Set(size++, range);
   } else {
@@ -772,7 +763,7 @@
   }
 
   // Remove any components that just got subsumed.
-  for (int i = 2; i < size; ) {
+  for (int i = 2; i < size;) {
     if (result->Get(i)->SemanticIs(range)) {
       result->Set(i, result->Get(--size));
     } else {
@@ -782,26 +773,27 @@
   return size;
 }
 
-RangeType::Limits Type::ToLimits(bitset bits, Zone* zone) {
-  bitset number_bits = BitsetType::NumberBits(bits);
+AstRangeType::Limits AstType::ToLimits(bitset bits, Zone* zone) {
+  bitset number_bits = AstBitsetType::NumberBits(bits);
 
-  if (number_bits == BitsetType::kNone) {
-    return RangeType::Limits::Empty();
+  if (number_bits == AstBitsetType::kNone) {
+    return AstRangeType::Limits::Empty();
   }
 
-  return RangeType::Limits(BitsetType::Min(number_bits),
-                           BitsetType::Max(number_bits));
+  return AstRangeType::Limits(AstBitsetType::Min(number_bits),
+                              AstBitsetType::Max(number_bits));
 }
 
-RangeType::Limits Type::IntersectRangeAndBitset(Type* range, Type* bitset,
-                                                Zone* zone) {
-  RangeType::Limits range_lims(range->AsRange());
-  RangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
-  return RangeType::Limits::Intersect(range_lims, bitset_lims);
+AstRangeType::Limits AstType::IntersectRangeAndBitset(AstType* range,
+                                                      AstType* bitset,
+                                                      Zone* zone) {
+  AstRangeType::Limits range_lims(range->AsRange());
+  AstRangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
+  return AstRangeType::Limits::Intersect(range_lims, bitset_lims);
 }
 
-int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
-                       RangeType::Limits* lims, Zone* zone) {
+int AstType::IntersectAux(AstType* lhs, AstType* rhs, AstUnionType* result,
+                          int size, AstRangeType::Limits* lims, Zone* zone) {
   if (lhs->IsUnion()) {
     for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
       size =
@@ -817,31 +809,33 @@
     return size;
   }
 
-  if (!BitsetType::SemanticIsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+  if (!AstBitsetType::SemanticIsInhabited(lhs->BitsetLub() &
+                                          rhs->BitsetLub())) {
     return size;
   }
 
   if (lhs->IsRange()) {
     if (rhs->IsBitset()) {
-      RangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
+      AstRangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
 
       if (!lim.IsEmpty()) {
-        *lims = RangeType::Limits::Union(lim, *lims);
+        *lims = AstRangeType::Limits::Union(lim, *lims);
       }
       return size;
     }
     if (rhs->IsClass()) {
-      *lims =
-          RangeType::Limits::Union(RangeType::Limits(lhs->AsRange()), *lims);
+      *lims = AstRangeType::Limits::Union(AstRangeType::Limits(lhs->AsRange()),
+                                          *lims);
     }
     if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
       return AddToUnion(rhs, result, size, zone);
     }
     if (rhs->IsRange()) {
-      RangeType::Limits lim = RangeType::Limits::Intersect(
-          RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
+      AstRangeType::Limits lim =
+          AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs->AsRange()),
+                                          AstRangeType::Limits(rhs->AsRange()));
       if (!lim.IsEmpty()) {
-        *lims = RangeType::Limits::Union(lim, *lims);
+        *lims = AstRangeType::Limits::Union(lim, *lims);
       }
     }
     return size;
@@ -862,29 +856,29 @@
   return size;
 }
 
-
 // Make sure that we produce a well-formed range and bitset:
 // If the range is non-empty, the number bits in the bitset should be
 // clear. Moreover, if we have a canonical range (such as Signed32),
 // we want to produce a bitset rather than a range.
-Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
+AstType* AstType::NormalizeRangeAndBitset(AstType* range, bitset* bits,
+                                          Zone* zone) {
   // Fast path: If the bitset does not mention numbers, we can just keep the
   // range.
-  bitset number_bits = BitsetType::NumberBits(*bits);
+  bitset number_bits = AstBitsetType::NumberBits(*bits);
   if (number_bits == 0) {
     return range;
   }
 
   // If the range is semantically contained within the bitset, return None and
   // leave the bitset untouched.
-  bitset range_lub = SEMANTIC(range->BitsetLub());
-  if (BitsetType::Is(range_lub, *bits)) {
+  bitset range_lub = AST_SEMANTIC(range->BitsetLub());
+  if (AstBitsetType::Is(range_lub, *bits)) {
     return None();
   }
 
   // Slow path: reconcile the bitset range and the range.
-  double bitset_min = BitsetType::Min(number_bits);
-  double bitset_max = BitsetType::Max(number_bits);
+  double bitset_min = AstBitsetType::Min(number_bits);
+  double bitset_max = AstBitsetType::Max(number_bits);
 
   double range_min = range->Min();
   double range_max = range->Max();
@@ -905,13 +899,13 @@
   if (bitset_max > range_max) {
     range_max = bitset_max;
   }
-  return RangeType::New(range_min, range_max, BitsetType::kNone, zone);
+  return AstRangeType::New(range_min, range_max, AstBitsetType::kNone, zone);
 }
 
-Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
+AstType* AstType::Union(AstType* type1, AstType* type2, Zone* zone) {
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
-    return BitsetType::New(type1->AsBitset() | type2->AsBitset());
+    return AstBitsetType::New(type1->AsBitset() | type2->AsBitset());
   }
 
   // Fast case: top or bottom types.
@@ -936,30 +930,30 @@
   int size = size1 + size2;
   if (!AddIsSafe(size, 2)) return Any();
   size += 2;
-  Type* result_type = UnionType::New(size, zone);
-  UnionType* result = result_type->AsUnion();
+  AstType* result_type = AstUnionType::New(size, zone);
+  AstUnionType* result = result_type->AsUnion();
   size = 0;
 
   // Compute the new bitset.
-  bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
+  bitset new_bitset = AST_SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
 
   // Deal with ranges.
-  Type* range = None();
-  Type* range1 = type1->GetRange();
-  Type* range2 = type2->GetRange();
+  AstType* range = None();
+  AstType* range1 = type1->GetRange();
+  AstType* range2 = type2->GetRange();
   if (range1 != NULL && range2 != NULL) {
-    RangeType::Limits lims =
-        RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
-                                 RangeType::Limits(range2->AsRange()));
-    Type* union_range = RangeType::New(lims, representation, zone);
+    AstRangeType::Limits lims =
+        AstRangeType::Limits::Union(AstRangeType::Limits(range1->AsRange()),
+                                    AstRangeType::Limits(range2->AsRange()));
+    AstType* union_range = AstRangeType::New(lims, representation, zone);
     range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
   } else if (range1 != NULL) {
     range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
   } else if (range2 != NULL) {
     range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
   }
-  new_bitset = SEMANTIC(new_bitset) | representation;
-  Type* bits = BitsetType::New(new_bitset);
+  new_bitset = AST_SEMANTIC(new_bitset) | representation;
+  AstType* bits = AstBitsetType::New(new_bitset);
   result->Set(size++, bits);
   if (!range->IsNone()) result->Set(size++, range);
 
@@ -968,10 +962,10 @@
   return NormalizeUnion(result_type, size, zone);
 }
 
-
 // Add [type] to [result] unless [type] is bitset, range, or already subsumed.
 // Return new size of [result].
-int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
+int AstType::AddToUnion(AstType* type, AstUnionType* result, int size,
+                        Zone* zone) {
   if (type->IsBitset() || type->IsRange()) return size;
   if (type->IsUnion()) {
     for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
@@ -986,8 +980,8 @@
   return size;
 }
 
-Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
-  UnionType* unioned = union_type->AsUnion();
+AstType* AstType::NormalizeUnion(AstType* union_type, int size, Zone* zone) {
+  AstUnionType* unioned = union_type->AsUnion();
   DCHECK(size >= 1);
   DCHECK(unioned->Get(0)->IsBitset());
   // If the union has just one element, return it.
@@ -996,15 +990,15 @@
   }
   bitset bits = unioned->Get(0)->AsBitset();
   // If the union only consists of a range, we can get rid of the union.
-  if (size == 2 && SEMANTIC(bits) == BitsetType::kNone) {
-    bitset representation = REPRESENTATION(bits);
+  if (size == 2 && AST_SEMANTIC(bits) == AstBitsetType::kNone) {
+    bitset representation = AST_REPRESENTATION(bits);
     if (representation == unioned->Get(1)->Representation()) {
       return unioned->Get(1);
     }
     if (unioned->Get(1)->IsRange()) {
-      return RangeType::New(unioned->Get(1)->AsRange()->Min(),
-                            unioned->Get(1)->AsRange()->Max(),
-                            unioned->Get(0)->AsBitset(), zone);
+      return AstRangeType::New(unioned->Get(1)->AsRange()->Min(),
+                               unioned->Get(1)->AsRange()->Max(),
+                               unioned->Get(0)->AsBitset(), zone);
     }
   }
   unioned->Shrink(size);
@@ -1012,26 +1006,23 @@
   return union_type;
 }
 
-
 // -----------------------------------------------------------------------------
 // Component extraction
 
 // static
-Type* Type::Representation(Type* t, Zone* zone) {
-  return BitsetType::New(t->Representation());
+AstType* AstType::Representation(AstType* t, Zone* zone) {
+  return AstBitsetType::New(t->Representation());
 }
 
-
 // static
-Type* Type::Semantic(Type* t, Zone* zone) {
-  return Intersect(t, BitsetType::New(BitsetType::kSemantic), zone);
+AstType* AstType::Semantic(AstType* t, Zone* zone) {
+  return Intersect(t, AstBitsetType::New(AstBitsetType::kSemantic), zone);
 }
 
-
 // -----------------------------------------------------------------------------
 // Iteration.
 
-int Type::NumClasses() {
+int AstType::NumClasses() {
   DisallowHeapAllocation no_allocation;
   if (this->IsClass()) {
     return 1;
@@ -1046,7 +1037,7 @@
   }
 }
 
-int Type::NumConstants() {
+int AstType::NumConstants() {
   DisallowHeapAllocation no_allocation;
   if (this->IsConstant()) {
     return 1;
@@ -1062,48 +1053,47 @@
 }
 
 template <class T>
-Type* Type::Iterator<T>::get_type() {
+AstType* AstType::Iterator<T>::get_type() {
   DCHECK(!Done());
   return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
 }
 
-
 // C++ cannot specialise nested templates, so we have to go through this
 // contortion with an auxiliary template to simulate it.
 template <class T>
 struct TypeImplIteratorAux {
-  static bool matches(Type* type);
-  static i::Handle<T> current(Type* type);
+  static bool matches(AstType* type);
+  static i::Handle<T> current(AstType* type);
 };
 
 template <>
 struct TypeImplIteratorAux<i::Map> {
-  static bool matches(Type* type) { return type->IsClass(); }
-  static i::Handle<i::Map> current(Type* type) {
+  static bool matches(AstType* type) { return type->IsClass(); }
+  static i::Handle<i::Map> current(AstType* type) {
     return type->AsClass()->Map();
   }
 };
 
 template <>
 struct TypeImplIteratorAux<i::Object> {
-  static bool matches(Type* type) { return type->IsConstant(); }
-  static i::Handle<i::Object> current(Type* type) {
+  static bool matches(AstType* type) { return type->IsConstant(); }
+  static i::Handle<i::Object> current(AstType* type) {
     return type->AsConstant()->Value();
   }
 };
 
 template <class T>
-bool Type::Iterator<T>::matches(Type* type) {
+bool AstType::Iterator<T>::matches(AstType* type) {
   return TypeImplIteratorAux<T>::matches(type);
 }
 
 template <class T>
-i::Handle<T> Type::Iterator<T>::Current() {
+i::Handle<T> AstType::Iterator<T>::Current() {
   return TypeImplIteratorAux<T>::current(get_type());
 }
 
 template <class T>
-void Type::Iterator<T>::Advance() {
+void AstType::Iterator<T>::Advance() {
   DisallowHeapAllocation no_allocation;
   ++index_;
   if (type_->IsUnion()) {
@@ -1116,31 +1106,33 @@
   index_ = -1;
 }
 
-
 // -----------------------------------------------------------------------------
 // Printing.
 
-const char* BitsetType::Name(bitset bits) {
+const char* AstBitsetType::Name(bitset bits) {
   switch (bits) {
-    case REPRESENTATION(kAny): return "Any";
-    #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
-    case REPRESENTATION(k##type): return #type;
-    REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
-    #undef RETURN_NAMED_REPRESENTATION_TYPE
+    case AST_REPRESENTATION(kAny):
+      return "Any";
+#define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
+  case AST_REPRESENTATION(k##type):                   \
+    return #type;
+      AST_REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
+#undef RETURN_NAMED_REPRESENTATION_TYPE
 
-    #define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
-    case SEMANTIC(k##type): return #type;
-    SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
-    INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
-    #undef RETURN_NAMED_SEMANTIC_TYPE
+#define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
+  case AST_SEMANTIC(k##type):                   \
+    return #type;
+      AST_SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+      AST_INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+#undef RETURN_NAMED_SEMANTIC_TYPE
 
     default:
       return NULL;
   }
 }
 
-void BitsetType::Print(std::ostream& os,  // NOLINT
-                       bitset bits) {
+void AstBitsetType::Print(std::ostream& os,  // NOLINT
+                          bitset bits) {
   DisallowHeapAllocation no_allocation;
   const char* name = Name(bits);
   if (name != NULL) {
@@ -1150,13 +1142,13 @@
 
   // clang-format off
   static const bitset named_bitsets[] = {
-#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
-    REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#define BITSET_CONSTANT(type, value) AST_REPRESENTATION(k##type),
+    AST_REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
 #undef BITSET_CONSTANT
 
-#define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
-    INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
-    SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#define BITSET_CONSTANT(type, value) AST_SEMANTIC(k##type),
+    AST_INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
+    AST_SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
 #undef BITSET_CONSTANT
   };
   // clang-format on
@@ -1176,14 +1168,14 @@
   os << ")";
 }
 
-void Type::PrintTo(std::ostream& os, PrintDimension dim) {
+void AstType::PrintTo(std::ostream& os, PrintDimension dim) {
   DisallowHeapAllocation no_allocation;
   if (dim != REPRESENTATION_DIM) {
     if (this->IsBitset()) {
-      BitsetType::Print(os, SEMANTIC(this->AsBitset()));
+      AstBitsetType::Print(os, AST_SEMANTIC(this->AsBitset()));
     } else if (this->IsClass()) {
       os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < ";
-      BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
+      AstBitsetType::New(AstBitsetType::Lub(this))->PrintTo(os, dim);
       os << ")";
     } else if (this->IsConstant()) {
       os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
@@ -1201,7 +1193,7 @@
     } else if (this->IsUnion()) {
       os << "(";
       for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-        Type* type_i = this->AsUnion()->Get(i);
+        AstType* type_i = this->AsUnion()->Get(i);
         if (i > 0) os << " | ";
         type_i->PrintTo(os, dim);
       }
@@ -1225,7 +1217,7 @@
     } else if (this->IsTuple()) {
       os << "<";
       for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
-        Type* type_i = this->AsTuple()->Element(i);
+        AstType* type_i = this->AsTuple()->Element(i);
         if (i > 0) os << ", ";
         type_i->PrintTo(os, dim);
       }
@@ -1236,34 +1228,33 @@
   }
   if (dim == BOTH_DIMS) os << "/";
   if (dim != SEMANTIC_DIM) {
-    BitsetType::Print(os, REPRESENTATION(this->BitsetLub()));
+    AstBitsetType::Print(os, AST_REPRESENTATION(this->BitsetLub()));
   }
 }
 
-
 #ifdef DEBUG
-void Type::Print() {
+void AstType::Print() {
   OFStream os(stdout);
   PrintTo(os);
   os << std::endl;
 }
-void BitsetType::Print(bitset bits) {
+void AstBitsetType::Print(bitset bits) {
   OFStream os(stdout);
   Print(os, bits);
   os << std::endl;
 }
 #endif
 
-BitsetType::bitset BitsetType::SignedSmall() {
+AstBitsetType::bitset AstBitsetType::SignedSmall() {
   return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
 }
 
-BitsetType::bitset BitsetType::UnsignedSmall() {
+AstBitsetType::bitset AstBitsetType::UnsignedSmall() {
   return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
 }
 
 #define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  Type* Type::Name(Isolate* isolate, Zone* zone) {                   \
+  AstType* AstType::Name(Isolate* isolate, Zone* zone) {             \
     return Class(i::handle(isolate->heap()->name##_map()), zone);    \
   }
 SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
@@ -1272,8 +1263,8 @@
 // -----------------------------------------------------------------------------
 // Instantiations.
 
-template class Type::Iterator<i::Map>;
-template class Type::Iterator<i::Object>;
+template class AstType::Iterator<i::Map>;
+template class AstType::Iterator<i::Object>;
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/ast-types.h b/src/ast/ast-types.h
new file mode 100644
index 0000000..0b6e23f
--- /dev/null
+++ b/src/ast/ast-types.h
@@ -0,0 +1,1024 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_TYPES_H_
+#define V8_AST_AST_TYPES_H_
+
+#include "src/conversions.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// SUMMARY
+//
+// A simple type system for compiler-internal use. It is based entirely on
+// union types, and all subtyping hence amounts to set inclusion. Besides the
+// obvious primitive types and some predefined unions, the type language also
+// can express class types (a.k.a. specific maps) and singleton types (i.e.,
+// concrete constants).
+//
+// Types consist of two dimensions: semantic (value range) and representation.
+// Both are related through subtyping.
+//
+//
+// SEMANTIC DIMENSION
+//
+// The following equations and inequations hold for the semantic axis:
+//
+//   None <= T
+//   T <= Any
+//
+//   Number = Signed32 \/ Unsigned32 \/ Double
+//   Smi <= Signed32
+//   Name = String \/ Symbol
+//   UniqueName = InternalizedString \/ Symbol
+//   InternalizedString < String
+//
+//   Receiver = Object \/ Proxy
+//   Array < Object
+//   Function < Object
+//   RegExp < Object
+//   OtherUndetectable < Object
+//   DetectableReceiver = Receiver - OtherUndetectable
+//
+//   Class(map) < T   iff instance_type(map) < T
+//   Constant(x) < T  iff instance_type(map(x)) < T
+//   Array(T) < Array
+//   Function(R, S, T0, T1, ...) < Function
+//   Context(T) < Internal
+//
+// Both structural Array and Function types are invariant in all parameters;
+// relaxing this would make Union and Intersect operations more involved.
+// There is no subtyping relation between Array, Function, or Context types
+// and respective Constant types, since these types cannot be reconstructed
+// for arbitrary heap values.
+// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
+// change! (Its instance type cannot, however.)
+// TODO(rossberg): the latter is not currently true for proxies, because of fix,
+// but will hold once we implement direct proxies.
+// However, we also define a 'temporal' variant of the subtyping relation that
+// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
+//
+//
+// REPRESENTATIONAL DIMENSION
+//
+// For the representation axis, the following holds:
+//
+//   None <= R
+//   R <= Any
+//
+//   UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
+//                 UntaggedInt16 \/ UntaggedInt32
+//   UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
+//   UntaggedNumber = UntaggedInt \/ UntaggedFloat
+//   Untagged = UntaggedNumber \/ UntaggedPtr
+//   Tagged = TaggedInt \/ TaggedPtr
+//
+// Subtyping relates the two dimensions, for example:
+//
+//   Number <= Tagged \/ UntaggedNumber
+//   Object <= TaggedPtr \/ UntaggedPtr
+//
+// That holds because the semantic type constructors defined by the API create
+// types that allow for all possible representations, and dually, the ones for
+// representation types initially include all semantic ranges. Representations
+// can then e.g. be narrowed for a given semantic type using intersection:
+//
+//   SignedSmall /\ TaggedInt       (a 'smi')
+//   Number /\ TaggedPtr            (a heap number)
+//
+//
+// RANGE TYPES
+//
+// A range type represents a continuous integer interval by its minimum and
+// maximum value.  Either value may be an infinity, in which case that infinity
+// itself is also included in the range.   A range never contains NaN or -0.
+//
+// If a value v happens to be an integer n, then Constant(v) is considered a
+// subtype of Range(n, n) (and therefore also a subtype of any larger range).
+// In order to avoid large unions, however, it is usually a good idea to use
+// Range rather than Constant.
+//
+//
+// PREDICATES
+//
+// There are two main functions for testing types:
+//
+//   T1->Is(T2)     -- tests whether T1 is included in T2 (i.e., T1 <= T2)
+//   T1->Maybe(T2)  -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
+//
+// Typically, the former is to be used to select representations (e.g., via
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
+//
+// There is no functionality to discover whether a type is a leaf in the
+// lattice. That is intentional. It should always be possible to refine the
+// lattice (e.g., splitting up number types further) without invalidating any
+// existing assumptions or tests.
+// Consequently, do not normally use Equals for type tests, always use Is!
+//
+// The NowIs operator implements state-sensitive subtying, as described above.
+// Any compilation decision based on such temporary properties requires runtime
+// guarding!
+//
+//
+// PROPERTIES
+//
+// Various formal properties hold for constructors, operators, and predicates
+// over types. For example, constructors are injective and subtyping is a
+// complete partial order.
+//
+// See test/cctest/test-types.cc for a comprehensive executable specification,
+// especially with respect to the properties of the more exotic 'temporal'
+// constructors and predicates (those prefixed 'Now').
+//
+//
+// IMPLEMENTATION
+//
+// Internally, all 'primitive' types, and their unions, are represented as
+// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
+// respective map. Only structured types require allocation.
+// Note that the bitset representation is closed under both Union and Intersect.
+
+// -----------------------------------------------------------------------------
+// Values for bitset types
+
+// clang-format off
+
+#define AST_MASK_BITSET_TYPE_LIST(V) \
+  V(Representation, 0xffc00000u) \
+  V(Semantic,       0x003ffffeu)
+
+#define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
+#define AST_SEMANTIC(k)       ((k) & AstBitsetType::kSemantic)
+
+#define AST_REPRESENTATION_BITSET_TYPE_LIST(V)    \
+  V(None,               0)                    \
+  V(UntaggedBit,        1u << 22 | kSemantic) \
+  V(UntaggedIntegral8,  1u << 23 | kSemantic) \
+  V(UntaggedIntegral16, 1u << 24 | kSemantic) \
+  V(UntaggedIntegral32, 1u << 25 | kSemantic) \
+  V(UntaggedFloat32,    1u << 26 | kSemantic) \
+  V(UntaggedFloat64,    1u << 27 | kSemantic) \
+  V(UntaggedSimd128,    1u << 28 | kSemantic) \
+  V(UntaggedPointer,    1u << 29 | kSemantic) \
+  V(TaggedSigned,       1u << 30 | kSemantic) \
+  V(TaggedPointer,      1u << 31 | kSemantic) \
+  \
+  V(UntaggedIntegral,   kUntaggedBit | kUntaggedIntegral8 |        \
+                        kUntaggedIntegral16 | kUntaggedIntegral32) \
+  V(UntaggedFloat,      kUntaggedFloat32 | kUntaggedFloat64)       \
+  V(UntaggedNumber,     kUntaggedIntegral | kUntaggedFloat)        \
+  V(Untagged,           kUntaggedNumber | kUntaggedPointer)        \
+  V(Tagged,             kTaggedSigned | kTaggedPointer)
+
+#define AST_INTERNAL_BITSET_TYPE_LIST(V)                                      \
+  V(OtherUnsigned31, 1u << 1 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherUnsigned32, 1u << 2 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherSigned32,   1u << 3 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherNumber,     1u << 4 | AST_REPRESENTATION(kTagged | kUntaggedNumber))
+
+#define AST_SEMANTIC_BITSET_TYPE_LIST(V)                                \
+  V(Negative31,          1u << 5  |                                     \
+                         AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(Null,                1u << 6  | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Undefined,           1u << 7  | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Boolean,             1u << 8  | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Unsigned30,          1u << 9  |                                     \
+                         AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(MinusZero,           1u << 10 |                                     \
+                         AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(NaN,                 1u << 11 |                                     \
+                         AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(Symbol,              1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(InternalizedString,  1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherString,         1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Simd,                1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherObject,         1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherUndetectable,   1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Proxy,               1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Function,            1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Hole,                1u << 20 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherInternal,       1u << 21 |                                     \
+                         AST_REPRESENTATION(kTagged | kUntagged))       \
+  \
+  V(Signed31,                   kUnsigned30 | kNegative31) \
+  V(Signed32,                   kSigned31 | kOtherUnsigned31 |          \
+                                kOtherSigned32)                         \
+  V(Signed32OrMinusZero,        kSigned32 | kMinusZero) \
+  V(Signed32OrMinusZeroOrNaN,   kSigned32 | kMinusZero | kNaN) \
+  V(Negative32,                 kNegative31 | kOtherSigned32) \
+  V(Unsigned31,                 kUnsigned30 | kOtherUnsigned31) \
+  V(Unsigned32,                 kUnsigned30 | kOtherUnsigned31 | \
+                                kOtherUnsigned32) \
+  V(Unsigned32OrMinusZero,      kUnsigned32 | kMinusZero) \
+  V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
+  V(Integral32,                 kSigned32 | kUnsigned32) \
+  V(PlainNumber,                kIntegral32 | kOtherNumber) \
+  V(OrderedNumber,              kPlainNumber | kMinusZero) \
+  V(MinusZeroOrNaN,             kMinusZero | kNaN) \
+  V(Number,                     kOrderedNumber | kNaN) \
+  V(String,                     kInternalizedString | kOtherString) \
+  V(UniqueName,                 kSymbol | kInternalizedString) \
+  V(Name,                       kSymbol | kString) \
+  V(BooleanOrNumber,            kBoolean | kNumber) \
+  V(BooleanOrNullOrNumber,      kBooleanOrNumber | kNull) \
+  V(BooleanOrNullOrUndefined,   kBoolean | kNull | kUndefined) \
+  V(NullOrNumber,               kNull | kNumber) \
+  V(NullOrUndefined,            kNull | kUndefined) \
+  V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
+  V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
+  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
+  V(NumberOrString,             kNumber | kString) \
+  V(NumberOrUndefined,          kNumber | kUndefined) \
+  V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
+  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
+  V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
+  V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
+  V(Receiver,                   kObject | kProxy) \
+  V(StringOrReceiver,           kString | kReceiver) \
+  V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
+                                kReceiver) \
+  V(Internal,                   kHole | kOtherInternal) \
+  V(NonInternal,                kPrimitive | kReceiver) \
+  V(NonNumber,                  kUnique | kString | kInternal) \
+  V(Any,                        0xfffffffeu)
+
+// clang-format on
+
+/*
+ * The following diagrams show how integers (in the mathematical sense) are
+ * divided among the different atomic numerical types.
+ *
+ *   ON    OS32     N31     U30     OU31    OU32     ON
+ * ______[_______[_______[_______[_______[_______[_______
+ *     -2^31   -2^30     0      2^30    2^31    2^32
+ *
+ * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ * Some of the atomic numerical bitsets are internal only (see
+ * INTERNAL_BITSET_TYPE_LIST).  To a types user, they should only occur in
+ * union with certain other bitsets.  For instance, OtherNumber should only
+ * occur as part of PlainNumber.
+ */
+
+#define AST_PROPER_BITSET_TYPE_LIST(V)   \
+  AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
+  AST_SEMANTIC_BITSET_TYPE_LIST(V)
+
+#define AST_BITSET_TYPE_LIST(V)          \
+  AST_MASK_BITSET_TYPE_LIST(V)           \
+  AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
+  AST_INTERNAL_BITSET_TYPE_LIST(V)       \
+  AST_SEMANTIC_BITSET_TYPE_LIST(V)
+
+class AstType;
+
+// -----------------------------------------------------------------------------
+// Bitset types (internal).
+
+class AstBitsetType {
+ public:
+  typedef uint32_t bitset;  // Internal
+
+  enum : uint32_t {
+#define DECLARE_TYPE(type, value) k##type = (value),
+    AST_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+        kUnusedEOL = 0
+  };
+
+  static bitset SignedSmall();
+  static bitset UnsignedSmall();
+
+  bitset Bitset() {
+    return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
+  }
+
+  static bool IsInhabited(bitset bits) {
+    return AST_SEMANTIC(bits) != kNone && AST_REPRESENTATION(bits) != kNone;
+  }
+
+  static bool SemanticIsInhabited(bitset bits) {
+    return AST_SEMANTIC(bits) != kNone;
+  }
+
+  static bool Is(bitset bits1, bitset bits2) {
+    return (bits1 | bits2) == bits2;
+  }
+
+  static double Min(bitset);
+  static double Max(bitset);
+
+  static bitset Glb(AstType* type);  // greatest lower bound that's a bitset
+  static bitset Glb(double min, double max);
+  static bitset Lub(AstType* type);  // least upper bound that's a bitset
+  static bitset Lub(i::Map* map);
+  static bitset Lub(i::Object* value);
+  static bitset Lub(double value);
+  static bitset Lub(double min, double max);
+  static bitset ExpandInternals(bitset bits);
+
+  static const char* Name(bitset);
+  static void Print(std::ostream& os, bitset);  // NOLINT
+#ifdef DEBUG
+  static void Print(bitset);
+#endif
+
+  static bitset NumberBits(bitset bits);
+
+  static bool IsBitset(AstType* type) {
+    return reinterpret_cast<uintptr_t>(type) & 1;
+  }
+
+  static AstType* NewForTesting(bitset bits) { return New(bits); }
+
+ private:
+  friend class AstType;
+
+  static AstType* New(bitset bits) {
+    return reinterpret_cast<AstType*>(static_cast<uintptr_t>(bits | 1u));
+  }
+
+  struct Boundary {
+    bitset internal;
+    bitset external;
+    double min;
+  };
+  static const Boundary BoundariesArray[];
+  static inline const Boundary* Boundaries();
+  static inline size_t BoundariesSize();
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for non-bitset types (internal).
+class AstTypeBase {
+ protected:
+  friend class AstType;
+
+  enum Kind {
+    kClass,
+    kConstant,
+    kContext,
+    kArray,
+    kFunction,
+    kTuple,
+    kUnion,
+    kRange
+  };
+
+  Kind kind() const { return kind_; }
+  explicit AstTypeBase(Kind kind) : kind_(kind) {}
+
+  static bool IsKind(AstType* type, Kind kind) {
+    if (AstBitsetType::IsBitset(type)) return false;
+    AstTypeBase* base = reinterpret_cast<AstTypeBase*>(type);
+    return base->kind() == kind;
+  }
+
+  // The hacky conversion to/from AstType*.
+  static AstType* AsType(AstTypeBase* type) {
+    return reinterpret_cast<AstType*>(type);
+  }
+  static AstTypeBase* FromType(AstType* type) {
+    return reinterpret_cast<AstTypeBase*>(type);
+  }
+
+ private:
+  Kind kind_;
+};
+
+// -----------------------------------------------------------------------------
+// Class types.
+
+class AstClassType : public AstTypeBase {
+ public:
+  i::Handle<i::Map> Map() { return map_; }
+
+ private:
+  friend class AstType;
+  friend class AstBitsetType;
+
+  static AstType* New(i::Handle<i::Map> map, Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstClassType)))
+                      AstClassType(AstBitsetType::Lub(*map), map));
+  }
+
+  static AstClassType* cast(AstType* type) {
+    DCHECK(IsKind(type, kClass));
+    return static_cast<AstClassType*>(FromType(type));
+  }
+
+  AstClassType(AstBitsetType::bitset bitset, i::Handle<i::Map> map)
+      : AstTypeBase(kClass), bitset_(bitset), map_(map) {}
+
+  AstBitsetType::bitset Lub() { return bitset_; }
+
+  AstBitsetType::bitset bitset_;
+  Handle<i::Map> map_;
+};
+
+// -----------------------------------------------------------------------------
+// Constant types.
+
+class AstConstantType : public AstTypeBase {
+ public:
+  i::Handle<i::Object> Value() { return object_; }
+
+ private:
+  friend class AstType;
+  friend class AstBitsetType;
+
+  static AstType* New(i::Handle<i::Object> value, Zone* zone) {
+    AstBitsetType::bitset bitset = AstBitsetType::Lub(*value);
+    return AsType(new (zone->New(sizeof(AstConstantType)))
+                      AstConstantType(bitset, value));
+  }
+
+  static AstConstantType* cast(AstType* type) {
+    DCHECK(IsKind(type, kConstant));
+    return static_cast<AstConstantType*>(FromType(type));
+  }
+
+  AstConstantType(AstBitsetType::bitset bitset, i::Handle<i::Object> object)
+      : AstTypeBase(kConstant), bitset_(bitset), object_(object) {}
+
+  AstBitsetType::bitset Lub() { return bitset_; }
+
+  AstBitsetType::bitset bitset_;
+  Handle<i::Object> object_;
+};
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
+
+// -----------------------------------------------------------------------------
+// Range types.
+
+class AstRangeType : public AstTypeBase {
+ public:
+  struct Limits {
+    double min;
+    double max;
+    Limits(double min, double max) : min(min), max(max) {}
+    explicit Limits(AstRangeType* range)
+        : min(range->Min()), max(range->Max()) {}
+    bool IsEmpty();
+    static Limits Empty() { return Limits(1, 0); }
+    static Limits Intersect(Limits lhs, Limits rhs);
+    static Limits Union(Limits lhs, Limits rhs);
+  };
+
+  double Min() { return limits_.min; }
+  double Max() { return limits_.max; }
+
+ private:
+  friend class AstType;
+  friend class AstBitsetType;
+  friend class AstUnionType;
+
+  static AstType* New(double min, double max,
+                      AstBitsetType::bitset representation, Zone* zone) {
+    return New(Limits(min, max), representation, zone);
+  }
+
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+
+  static AstType* New(Limits lim, AstBitsetType::bitset representation,
+                      Zone* zone) {
+    DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
+    DCHECK(lim.min <= lim.max);
+    DCHECK(AST_REPRESENTATION(representation) == representation);
+    AstBitsetType::bitset bits =
+        AST_SEMANTIC(AstBitsetType::Lub(lim.min, lim.max)) | representation;
+
+    return AsType(new (zone->New(sizeof(AstRangeType)))
+                      AstRangeType(bits, lim));
+  }
+
+  static AstRangeType* cast(AstType* type) {
+    DCHECK(IsKind(type, kRange));
+    return static_cast<AstRangeType*>(FromType(type));
+  }
+
+  AstRangeType(AstBitsetType::bitset bitset, Limits limits)
+      : AstTypeBase(kRange), bitset_(bitset), limits_(limits) {}
+
+  AstBitsetType::bitset Lub() { return bitset_; }
+
+  AstBitsetType::bitset bitset_;
+  Limits limits_;
+};
+
+// -----------------------------------------------------------------------------
+// Context types.
+
+class AstContextType : public AstTypeBase {
+ public:
+  AstType* Outer() { return outer_; }
+
+ private:
+  friend class AstType;
+
+  static AstType* New(AstType* outer, Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstContextType)))
+                      AstContextType(outer));  // NOLINT
+  }
+
+  static AstContextType* cast(AstType* type) {
+    DCHECK(IsKind(type, kContext));
+    return static_cast<AstContextType*>(FromType(type));
+  }
+
+  explicit AstContextType(AstType* outer)
+      : AstTypeBase(kContext), outer_(outer) {}
+
+  AstType* outer_;
+};
+
+// -----------------------------------------------------------------------------
+// Array types.
+
+class AstArrayType : public AstTypeBase {
+ public:
+  AstType* Element() { return element_; }
+
+ private:
+  friend class AstType;
+
+  explicit AstArrayType(AstType* element)
+      : AstTypeBase(kArray), element_(element) {}
+
+  static AstType* New(AstType* element, Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstArrayType))) AstArrayType(element));
+  }
+
+  static AstArrayType* cast(AstType* type) {
+    DCHECK(IsKind(type, kArray));
+    return static_cast<AstArrayType*>(FromType(type));
+  }
+
+  AstType* element_;
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for types with variable number of type fields.
+class AstStructuralType : public AstTypeBase {
+ public:
+  int LengthForTesting() { return Length(); }
+
+ protected:
+  friend class AstType;
+
+  int Length() { return length_; }
+
+  AstType* Get(int i) {
+    DCHECK(0 <= i && i < this->Length());
+    return elements_[i];
+  }
+
+  void Set(int i, AstType* type) {
+    DCHECK(0 <= i && i < this->Length());
+    elements_[i] = type;
+  }
+
+  void Shrink(int length) {
+    DCHECK(2 <= length && length <= this->Length());
+    length_ = length;
+  }
+
+  AstStructuralType(Kind kind, int length, i::Zone* zone)
+      : AstTypeBase(kind), length_(length) {
+    elements_ =
+        reinterpret_cast<AstType**>(zone->New(sizeof(AstType*) * length));
+  }
+
+ private:
+  int length_;
+  AstType** elements_;
+};
+
+// -----------------------------------------------------------------------------
+// Function types.
+
+class AstFunctionType : public AstStructuralType {
+ public:
+  int Arity() { return this->Length() - 2; }
+  AstType* Result() { return this->Get(0); }
+  AstType* Receiver() { return this->Get(1); }
+  AstType* Parameter(int i) { return this->Get(2 + i); }
+
+  void InitParameter(int i, AstType* type) { this->Set(2 + i, type); }
+
+ private:
+  friend class AstType;
+
+  AstFunctionType(AstType* result, AstType* receiver, int arity, Zone* zone)
+      : AstStructuralType(kFunction, 2 + arity, zone) {
+    Set(0, result);
+    Set(1, receiver);
+  }
+
+  static AstType* New(AstType* result, AstType* receiver, int arity,
+                      Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstFunctionType)))
+                      AstFunctionType(result, receiver, arity, zone));
+  }
+
+  static AstFunctionType* cast(AstType* type) {
+    DCHECK(IsKind(type, kFunction));
+    return static_cast<AstFunctionType*>(FromType(type));
+  }
+};
+
+// -----------------------------------------------------------------------------
+// Tuple types.
+
+class AstTupleType : public AstStructuralType {
+ public:
+  int Arity() { return this->Length(); }
+  AstType* Element(int i) { return this->Get(i); }
+
+  void InitElement(int i, AstType* type) { this->Set(i, type); }
+
+ private:
+  friend class AstType;
+
+  AstTupleType(int length, Zone* zone)
+      : AstStructuralType(kTuple, length, zone) {}
+
+  static AstType* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstTupleType)))
+                      AstTupleType(length, zone));
+  }
+
+  static AstTupleType* cast(AstType* type) {
+    DCHECK(IsKind(type, kTuple));
+    return static_cast<AstTupleType*>(FromType(type));
+  }
+};
+
+// -----------------------------------------------------------------------------
+// Union types (internal).
+// A union is a structured type with the following invariants:
+// - its length is at least 2
+// - at most one field is a bitset, and it must go into index 0
+// - no field is a union
+// - no field is a subtype of any other field
+class AstUnionType : public AstStructuralType {
+ private:
+  friend AstType;
+  friend AstBitsetType;
+
+  AstUnionType(int length, Zone* zone)
+      : AstStructuralType(kUnion, length, zone) {}
+
+  static AstType* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(AstUnionType)))
+                      AstUnionType(length, zone));
+  }
+
+  static AstUnionType* cast(AstType* type) {
+    DCHECK(IsKind(type, kUnion));
+    return static_cast<AstUnionType*>(FromType(type));
+  }
+
+  bool Wellformed();
+};
+
+class AstType {
+ public:
+  typedef AstBitsetType::bitset bitset;  // Internal
+
+// Constructors.
+#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+  static AstType* type() { return AstBitsetType::New(AstBitsetType::k##type); }
+  AST_PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+#undef DEFINE_TYPE_CONSTRUCTOR
+
+  static AstType* SignedSmall() {
+    return AstBitsetType::New(AstBitsetType::SignedSmall());
+  }
+  static AstType* UnsignedSmall() {
+    return AstBitsetType::New(AstBitsetType::UnsignedSmall());
+  }
+
+  static AstType* Class(i::Handle<i::Map> map, Zone* zone) {
+    return AstClassType::New(map, zone);
+  }
+  static AstType* Constant(i::Handle<i::Object> value, Zone* zone) {
+    return AstConstantType::New(value, zone);
+  }
+  static AstType* Range(double min, double max, Zone* zone) {
+    return AstRangeType::New(min, max,
+                             AST_REPRESENTATION(AstBitsetType::kTagged |
+                                                AstBitsetType::kUntaggedNumber),
+                             zone);
+  }
+  static AstType* Context(AstType* outer, Zone* zone) {
+    return AstContextType::New(outer, zone);
+  }
+  static AstType* Array(AstType* element, Zone* zone) {
+    return AstArrayType::New(element, zone);
+  }
+  static AstType* Function(AstType* result, AstType* receiver, int arity,
+                           Zone* zone) {
+    return AstFunctionType::New(result, receiver, arity, zone);
+  }
+  static AstType* Function(AstType* result, Zone* zone) {
+    return Function(result, Any(), 0, zone);
+  }
+  static AstType* Function(AstType* result, AstType* param0, Zone* zone) {
+    AstType* function = Function(result, Any(), 1, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    return function;
+  }
+  static AstType* Function(AstType* result, AstType* param0, AstType* param1,
+                           Zone* zone) {
+    AstType* function = Function(result, Any(), 2, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    function->AsFunction()->InitParameter(1, param1);
+    return function;
+  }
+  static AstType* Function(AstType* result, AstType* param0, AstType* param1,
+                           AstType* param2, Zone* zone) {
+    AstType* function = Function(result, Any(), 3, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    function->AsFunction()->InitParameter(1, param1);
+    function->AsFunction()->InitParameter(2, param2);
+    return function;
+  }
+  static AstType* Function(AstType* result, int arity, AstType** params,
+                           Zone* zone) {
+    AstType* function = Function(result, Any(), arity, zone);
+    for (int i = 0; i < arity; ++i) {
+      function->AsFunction()->InitParameter(i, params[i]);
+    }
+    return function;
+  }
+  static AstType* Tuple(AstType* first, AstType* second, AstType* third,
+                        Zone* zone) {
+    AstType* tuple = AstTupleType::New(3, zone);
+    tuple->AsTuple()->InitElement(0, first);
+    tuple->AsTuple()->InitElement(1, second);
+    tuple->AsTuple()->InitElement(2, third);
+    return tuple;
+  }
+
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+  static AstType* Name(Isolate* isolate, Zone* zone);
+  SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
+
+  static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
+  static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);
+
+  static AstType* Of(double value, Zone* zone) {
+    return AstBitsetType::New(
+        AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
+  }
+  static AstType* Of(i::Object* value, Zone* zone) {
+    return AstBitsetType::New(
+        AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
+  }
+  static AstType* Of(i::Handle<i::Object> value, Zone* zone) {
+    return Of(*value, zone);
+  }
+
+  static AstType* For(i::Map* map) {
+    return AstBitsetType::New(
+        AstBitsetType::ExpandInternals(AstBitsetType::Lub(map)));
+  }
+  static AstType* For(i::Handle<i::Map> map) { return For(*map); }
+
+  // Extraction of components.
+  static AstType* Representation(AstType* t, Zone* zone);
+  static AstType* Semantic(AstType* t, Zone* zone);
+
+  // Predicates.
+  bool IsInhabited() { return AstBitsetType::IsInhabited(this->BitsetLub()); }
+
+  bool Is(AstType* that) { return this == that || this->SlowIs(that); }
+  bool Maybe(AstType* that);
+  bool Equals(AstType* that) { return this->Is(that) && that->Is(this); }
+
+  // Equivalent to Constant(val)->Is(this), but avoiding allocation.
+  bool Contains(i::Object* val);
+  bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
+
+  // State-dependent versions of the above that consider subtyping between
+  // a constant and its map class.
+  static AstType* NowOf(i::Object* value, Zone* zone);
+  static AstType* NowOf(i::Handle<i::Object> value, Zone* zone) {
+    return NowOf(*value, zone);
+  }
+  bool NowIs(AstType* that);
+  bool NowContains(i::Object* val);
+  bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
+
+  bool NowStable();
+
+  // Inspection.
+  bool IsRange() { return IsKind(AstTypeBase::kRange); }
+  bool IsClass() { return IsKind(AstTypeBase::kClass); }
+  bool IsConstant() { return IsKind(AstTypeBase::kConstant); }
+  bool IsContext() { return IsKind(AstTypeBase::kContext); }
+  bool IsArray() { return IsKind(AstTypeBase::kArray); }
+  bool IsFunction() { return IsKind(AstTypeBase::kFunction); }
+  bool IsTuple() { return IsKind(AstTypeBase::kTuple); }
+
+  AstClassType* AsClass() { return AstClassType::cast(this); }
+  AstConstantType* AsConstant() { return AstConstantType::cast(this); }
+  AstRangeType* AsRange() { return AstRangeType::cast(this); }
+  AstContextType* AsContext() { return AstContextType::cast(this); }
+  AstArrayType* AsArray() { return AstArrayType::cast(this); }
+  AstFunctionType* AsFunction() { return AstFunctionType::cast(this); }
+  AstTupleType* AsTuple() { return AstTupleType::cast(this); }
+
+  // Minimum and maximum of a numeric type.
+  // These functions do not distinguish between -0 and +0.  If the type equals
+  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
+  // functions on subtypes of Number.
+  double Min();
+  double Max();
+
+  // Extracts a range from the type: if the type is a range or a union
+  // containing a range, that range is returned; otherwise, NULL is returned.
+  AstType* GetRange();
+
+  static bool IsInteger(i::Object* x);
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+
+  int NumClasses();
+  int NumConstants();
+
+  template <class T>
+  class Iterator {
+   public:
+    bool Done() const { return index_ < 0; }
+    i::Handle<T> Current();
+    void Advance();
+
+   private:
+    friend class AstType;
+
+    Iterator() : index_(-1) {}
+    explicit Iterator(AstType* type) : type_(type), index_(-1) { Advance(); }
+
+    inline bool matches(AstType* type);
+    inline AstType* get_type();
+
+    AstType* type_;
+    int index_;
+  };
+
+  Iterator<i::Map> Classes() {
+    if (this->IsBitset()) return Iterator<i::Map>();
+    return Iterator<i::Map>(this);
+  }
+  Iterator<i::Object> Constants() {
+    if (this->IsBitset()) return Iterator<i::Object>();
+    return Iterator<i::Object>(this);
+  }
+
+  // Printing.
+
+  enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+
+  void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS);  // NOLINT
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  // Helpers for testing.
+  bool IsBitsetForTesting() { return IsBitset(); }
+  bool IsUnionForTesting() { return IsUnion(); }
+  bitset AsBitsetForTesting() { return AsBitset(); }
+  AstUnionType* AsUnionForTesting() { return AsUnion(); }
+
+ private:
+  // Friends.
+  template <class>
+  friend class Iterator;
+  friend AstBitsetType;
+  friend AstUnionType;
+
+  // Internal inspection.
+  bool IsKind(AstTypeBase::Kind kind) {
+    return AstTypeBase::IsKind(this, kind);
+  }
+
+  bool IsNone() { return this == None(); }
+  bool IsAny() { return this == Any(); }
+  bool IsBitset() { return AstBitsetType::IsBitset(this); }
+  bool IsUnion() { return IsKind(AstTypeBase::kUnion); }
+
+  bitset AsBitset() {
+    DCHECK(this->IsBitset());
+    return reinterpret_cast<AstBitsetType*>(this)->Bitset();
+  }
+  AstUnionType* AsUnion() { return AstUnionType::cast(this); }
+
+  bitset Representation();
+
+  // Auxiliary functions.
+  bool SemanticMaybe(AstType* that);
+
+  bitset BitsetGlb() { return AstBitsetType::Glb(this); }
+  bitset BitsetLub() { return AstBitsetType::Lub(this); }
+
+  bool SlowIs(AstType* that);
+  bool SemanticIs(AstType* that);
+
+  static bool Overlap(AstRangeType* lhs, AstRangeType* rhs);
+  static bool Contains(AstRangeType* lhs, AstRangeType* rhs);
+  static bool Contains(AstRangeType* range, AstConstantType* constant);
+  static bool Contains(AstRangeType* range, i::Object* val);
+
+  static int UpdateRange(AstType* type, AstUnionType* result, int size,
+                         Zone* zone);
+
+  static AstRangeType::Limits IntersectRangeAndBitset(AstType* range,
+                                                      AstType* bits,
+                                                      Zone* zone);
+  static AstRangeType::Limits ToLimits(bitset bits, Zone* zone);
+
+  bool SimplyEquals(AstType* that);
+
+  static int AddToUnion(AstType* type, AstUnionType* result, int size,
+                        Zone* zone);
+  static int IntersectAux(AstType* type, AstType* other, AstUnionType* result,
+                          int size, AstRangeType::Limits* limits, Zone* zone);
+  static AstType* NormalizeUnion(AstType* unioned, int size, Zone* zone);
+  static AstType* NormalizeRangeAndBitset(AstType* range, bitset* bits,
+                                          Zone* zone);
+};
+
+// -----------------------------------------------------------------------------
+// Type bounds. A simple struct to represent a pair of lower/upper types.
+
+struct AstBounds {
+  AstType* lower;
+  AstType* upper;
+
+  AstBounds()
+      :  // Make sure accessing uninitialized bounds crashes big-time.
+        lower(nullptr),
+        upper(nullptr) {}
+  explicit AstBounds(AstType* t) : lower(t), upper(t) {}
+  AstBounds(AstType* l, AstType* u) : lower(l), upper(u) {
+    DCHECK(lower->Is(upper));
+  }
+
+  // Unrestricted bounds.
+  static AstBounds Unbounded() {
+    return AstBounds(AstType::None(), AstType::Any());
+  }
+
+  // Meet: both b1 and b2 are known to hold.
+  static AstBounds Both(AstBounds b1, AstBounds b2, Zone* zone) {
+    AstType* lower = AstType::Union(b1.lower, b2.lower, zone);
+    AstType* upper = AstType::Intersect(b1.upper, b2.upper, zone);
+    // Lower bounds are considered approximate, correct as necessary.
+    if (!lower->Is(upper)) lower = upper;
+    return AstBounds(lower, upper);
+  }
+
+  // Join: either b1 or b2 is known to hold.
+  static AstBounds Either(AstBounds b1, AstBounds b2, Zone* zone) {
+    AstType* lower = AstType::Intersect(b1.lower, b2.lower, zone);
+    AstType* upper = AstType::Union(b1.upper, b2.upper, zone);
+    return AstBounds(lower, upper);
+  }
+
+  static AstBounds NarrowLower(AstBounds b, AstType* t, Zone* zone) {
+    AstType* lower = AstType::Union(b.lower, t, zone);
+    // Lower bounds are considered approximate, correct as necessary.
+    if (!lower->Is(b.upper)) lower = b.upper;
+    return AstBounds(lower, b.upper);
+  }
+  static AstBounds NarrowUpper(AstBounds b, AstType* t, Zone* zone) {
+    AstType* lower = b.lower;
+    AstType* upper = AstType::Intersect(b.upper, t, zone);
+    // Lower bounds are considered approximate, correct as necessary.
+    if (!lower->Is(upper)) lower = upper;
+    return AstBounds(lower, upper);
+  }
+
+  bool Narrows(AstBounds that) {
+    return that.lower->Is(this->lower) && this->upper->Is(that.upper);
+  }
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_AST_AST_TYPES_H_
diff --git a/src/ast/ast-value-factory.cc b/src/ast/ast-value-factory.cc
index a271751..33ccec7 100644
--- a/src/ast/ast-value-factory.cc
+++ b/src/ast/ast-value-factory.cc
@@ -237,28 +237,14 @@
 
 
 const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
-  // For the FlatContent to stay valid, we shouldn't do any heap
-  // allocation. Make sure we won't try to internalize the string in GetString.
   AstRawString* result = NULL;
-  Isolate* saved_isolate = isolate_;
-  isolate_ = NULL;
-  {
-    DisallowHeapAllocation no_gc;
-    String::FlatContent content = literal->GetFlatContent();
-    if (content.IsOneByte()) {
-      result = GetOneByteStringInternal(content.ToOneByteVector());
-    } else {
-      DCHECK(content.IsTwoByte());
-      result = GetTwoByteStringInternal(content.ToUC16Vector());
-    }
-  }
-  isolate_ = saved_isolate;
-  if (strings_ != nullptr && isolate_) {
-    // Only the string we are creating is uninternalized at this point.
-    DCHECK_EQ(result, strings_);
-    DCHECK_NULL(strings_->next());
-    result->Internalize(isolate_);
-    ResetStrings();
+  DisallowHeapAllocation no_gc;
+  String::FlatContent content = literal->GetFlatContent();
+  if (content.IsOneByte()) {
+    result = GetOneByteStringInternal(content.ToOneByteVector());
+  } else {
+    DCHECK(content.IsTwoByte());
+    result = GetTwoByteStringInternal(content.ToUC16Vector());
   }
   return result;
 }
@@ -274,15 +260,40 @@
   return new_string;
 }
 
+const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
+                                                   const AstRawString* right) {
+  int left_length = left->length();
+  int right_length = right->length();
+  const unsigned char* left_data = left->raw_data();
+  const unsigned char* right_data = right->raw_data();
+  if (left->is_one_byte() && right->is_one_byte()) {
+    uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
+    memcpy(buffer, left_data, left_length);
+    memcpy(buffer + left_length, right_data, right_length);
+    Vector<const uint8_t> literal(buffer, left_length + right_length);
+    return GetOneByteStringInternal(literal);
+  } else {
+    uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
+    if (left->is_one_byte()) {
+      for (int i = 0; i < left_length; ++i) {
+        buffer[i] = left_data[i];
+      }
+    } else {
+      memcpy(buffer, left_data, 2 * left_length);
+    }
+    if (right->is_one_byte()) {
+      for (int i = 0; i < right_length; ++i) {
+        buffer[i + left_length] = right_data[i];
+      }
+    } else {
+      memcpy(buffer + left_length, right_data, 2 * right_length);
+    }
+    Vector<const uint16_t> literal(buffer, left_length + right_length);
+    return GetTwoByteStringInternal(literal);
+  }
+}
 
 void AstValueFactory::Internalize(Isolate* isolate) {
-  if (isolate_) {
-    DCHECK_NULL(strings_);
-    DCHECK_NULL(values_);
-    // Everything is already internalized.
-    return;
-  }
-
   // Strings need to be internalized before values, because values refer to
   // strings.
   for (AstString* current = strings_; current != nullptr;) {
@@ -295,7 +306,6 @@
     current->Internalize(isolate);
     current = next;
   }
-  isolate_ = isolate;
   ResetStrings();
   values_ = nullptr;
 }
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index da209e1..bc3eca2 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -283,8 +283,8 @@
   F(default, "default")                         \
   F(done, "done")                               \
   F(dot, ".")                                   \
+  F(dot_class_field_init, ".class-field-init")  \
   F(dot_for, ".for")                            \
-  F(dot_generator, ".generator")                \
   F(dot_generator_object, ".generator_object")  \
   F(dot_iterator, ".iterator")                  \
   F(dot_result, ".result")                      \
@@ -326,7 +326,6 @@
         values_(nullptr),
         strings_end_(&strings_),
         zone_(zone),
-        isolate_(NULL),
         hash_seed_(hash_seed) {
     ResetStrings();
 #define F(name, str) name##_string_ = NULL;
@@ -352,11 +351,10 @@
   const AstRawString* GetString(Handle<String> literal);
   const AstConsString* NewConsString(const AstString* left,
                                      const AstString* right);
+  const AstRawString* ConcatStrings(const AstRawString* left,
+                                    const AstRawString* right);
 
   void Internalize(Isolate* isolate);
-  bool IsInternalized() {
-    return isolate_ != NULL;
-  }
 
 #define F(name, str)                                                    \
   const AstRawString* name##_string() {                                 \
@@ -384,21 +382,13 @@
 
  private:
   AstValue* AddValue(AstValue* value) {
-    if (isolate_) {
-      value->Internalize(isolate_);
-    } else {
-      value->set_next(values_);
-      values_ = value;
-    }
+    value->set_next(values_);
+    values_ = value;
     return value;
   }
   AstString* AddString(AstString* string) {
-    if (isolate_) {
-      string->Internalize(isolate_);
-    } else {
-      *strings_end_ = string;
-      strings_end_ = string->next_location();
-    }
+    *strings_end_ = string;
+    strings_end_ = string->next_location();
     return string;
   }
   void ResetStrings() {
@@ -413,7 +403,7 @@
   static bool AstRawStringCompare(void* a, void* b);
 
   // All strings are copied here, one after another (no NULLs inbetween).
-  base::HashMap string_table_;
+  base::CustomMatcherHashMap string_table_;
   // For keeping track of all AstValues and AstRawStrings we've created (so that
   // they can be internalized later).
   AstValue* values_;
@@ -422,7 +412,6 @@
   AstString* strings_;
   AstString** strings_end_;
   Zone* zone_;
-  Isolate* isolate_;
 
   uint32_t hash_seed_;
 
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index 06037f4..97d1f9d 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -6,6 +6,7 @@
 
 #include <cmath>  // For isfinite.
 
+#include "src/ast/compile-time-value.h"
 #include "src/ast/prettyprinter.h"
 #include "src/ast/scopes.h"
 #include "src/base/hashmap.h"
@@ -13,7 +14,6 @@
 #include "src/code-stubs.h"
 #include "src/contexts.h"
 #include "src/conversions.h"
-#include "src/parsing/parser.h"
 #include "src/property-details.h"
 #include "src/property.h"
 #include "src/string-stream.h"
@@ -83,18 +83,14 @@
 }
 
 bool Expression::IsUndefinedLiteral() const {
-  if (IsLiteral()) {
-    if (AsLiteral()->raw_value()->IsUndefined()) {
-      return true;
-    }
-  }
+  if (IsLiteral() && AsLiteral()->raw_value()->IsUndefined()) return true;
 
   const VariableProxy* var_proxy = AsVariableProxy();
-  if (var_proxy == NULL) return false;
+  if (var_proxy == nullptr) return false;
   Variable* var = var_proxy->var();
   // The global identifier "undefined" is immutable. Everything
   // else could be reassigned.
-  return var != NULL && var->IsUnallocatedOrGlobalSlot() &&
+  return var != NULL && var->IsUnallocated() &&
          var_proxy->raw_name()->IsOneByteEqualTo("undefined");
 }
 
@@ -166,36 +162,32 @@
 VariableProxy::VariableProxy(Variable* var, int start_position,
                              int end_position)
     : Expression(start_position, kVariableProxy),
-      bit_field_(IsThisField::encode(var->is_this()) |
-                 IsAssignedField::encode(false) |
-                 IsResolvedField::encode(false)),
       end_position_(end_position),
       raw_name_(var->raw_name()),
       next_unresolved_(nullptr) {
+  bit_field_ |= IsThisField::encode(var->is_this()) |
+                IsAssignedField::encode(false) | IsResolvedField::encode(false);
   BindTo(var);
 }
 
 VariableProxy::VariableProxy(const AstRawString* name,
-                             Variable::Kind variable_kind, int start_position,
+                             VariableKind variable_kind, int start_position,
                              int end_position)
     : Expression(start_position, kVariableProxy),
-      bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
-                 IsAssignedField::encode(false) |
-                 IsResolvedField::encode(false)),
       end_position_(end_position),
       raw_name_(name),
-      next_unresolved_(nullptr) {}
+      next_unresolved_(nullptr) {
+  bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
+                IsAssignedField::encode(false) | IsResolvedField::encode(false);
+}
 
 VariableProxy::VariableProxy(const VariableProxy* copy_from)
     : Expression(copy_from->position(), kVariableProxy),
-      bit_field_(copy_from->bit_field_),
       end_position_(copy_from->end_position_),
       next_unresolved_(nullptr) {
-  if (copy_from->is_resolved()) {
-    var_ = copy_from->var_;
-  } else {
-    raw_name_ = copy_from->raw_name_;
-  }
+  bit_field_ = copy_from->bit_field_;
+  DCHECK(!copy_from->is_resolved());
+  raw_name_ = copy_from->raw_name_;
 }
 
 void VariableProxy::BindTo(Variable* var) {
@@ -253,12 +245,13 @@
 Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
                        int pos)
     : Expression(pos, kAssignment),
-      bit_field_(
-          IsUninitializedField::encode(false) | KeyTypeField::encode(ELEMENT) |
-          StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
       target_(target),
       value_(value),
-      binary_operation_(NULL) {}
+      binary_operation_(NULL) {
+  bit_field_ |= IsUninitializedField::encode(false) |
+                KeyTypeField::encode(ELEMENT) |
+                StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+}
 
 void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
                                            FeedbackVectorSpec* spec,
@@ -273,7 +266,7 @@
   AssignVectorSlots(expression(), spec, &slot_);
   // Assign a slot to collect feedback about binary operations. Used only in
   // ignition. Fullcodegen uses AstId to record type feedback.
-  binary_operation_slot_ = spec->AddGeneralSlot();
+  binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
 }
 
 
@@ -320,6 +313,7 @@
   return scope()->language_mode();
 }
 
+FunctionKind FunctionLiteral::kind() const { return scope()->function_kind(); }
 
 bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
   if (expr == nullptr || !expr->IsFunctionLiteral()) return false;
@@ -327,27 +321,16 @@
   return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
 }
 
-
 ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
-                                             Kind kind, bool is_static,
-                                             bool is_computed_name)
-    : key_(key),
-      value_(value),
+                                             Kind kind, bool is_computed_name)
+    : LiteralProperty(key, value, is_computed_name),
       kind_(kind),
-      emit_store_(true),
-      is_static_(is_static),
-      is_computed_name_(is_computed_name) {}
-
+      emit_store_(true) {}
 
 ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
                                              Expression* key, Expression* value,
-                                             bool is_static,
                                              bool is_computed_name)
-    : key_(key),
-      value_(value),
-      emit_store_(true),
-      is_static_(is_static),
-      is_computed_name_(is_computed_name) {
+    : LiteralProperty(key, value, is_computed_name), emit_store_(true) {
   if (!is_computed_name &&
       key->AsLiteral()->raw_value()->EqualsString(
           ast_value_factory->proto_string())) {
@@ -361,13 +344,20 @@
   }
 }
 
-bool ObjectLiteralProperty::NeedsSetFunctionName() const {
+bool LiteralProperty::NeedsSetFunctionName() const {
   return is_computed_name_ &&
          (value_->IsAnonymousFunctionDefinition() ||
           (value_->IsFunctionLiteral() &&
            IsConciseMethod(value_->AsFunctionLiteral()->kind())));
 }
 
+ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
+                                           Kind kind, bool is_static,
+                                           bool is_computed_name)
+    : LiteralProperty(key, value, is_computed_name),
+      kind_(kind),
+      is_static_(is_static) {}
+
 void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
                                              FeedbackVectorSpec* spec,
                                              FeedbackVectorSlotCache* cache) {
@@ -379,7 +369,7 @@
   }
 
   for (int i = 0; i < properties()->length(); i++) {
-    ObjectLiteral::Property* property = properties()->at(i);
+    ClassLiteral::Property* property = properties()->at(i);
     Expression* value = property->value();
     if (FunctionLiteral::NeedsHomeObject(value)) {
       property->SetSlot(spec->AddStoreICSlot());
@@ -387,8 +377,7 @@
   }
 }
 
-
-bool ObjectLiteral::Property::IsCompileTimeValue() {
+bool ObjectLiteral::Property::IsCompileTimeValue() const {
   return kind_ == CONSTANT ||
       (kind_ == MATERIALIZED_LITERAL &&
        CompileTimeValue::IsCompileTimeValue(value_));
@@ -399,11 +388,7 @@
   emit_store_ = emit_store;
 }
 
-
-bool ObjectLiteral::Property::emit_store() {
-  return emit_store_;
-}
-
+bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
 
 void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
                                               FeedbackVectorSpec* spec,
@@ -473,8 +458,8 @@
 
   ZoneAllocationPolicy allocator(zone);
 
-  ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
-                    allocator);
+  CustomMatcherZoneHashMap table(
+      Literal::Match, ZoneHashMap::kDefaultHashMapCapacity, allocator);
   for (int i = properties()->length() - 1; i >= 0; i--) {
     ObjectLiteral::Property* property = properties()->at(i);
     if (property->is_computed_name()) continue;
@@ -551,7 +536,7 @@
     // TODO(verwaest): Remove once we can store them inline.
     if (FLAG_track_double_fields &&
         (value->IsNumber() || value->IsUninitialized(isolate))) {
-      may_store_doubles_ = true;
+      bit_field_ = MayStoreDoublesField::update(bit_field_, true);
     }
 
     is_simple = is_simple && !value->IsUninitialized(isolate);
@@ -578,9 +563,11 @@
   }
 
   constant_properties_ = constant_properties;
-  fast_elements_ =
-      (max_element_index <= 32) || ((2 * elements) >= max_element_index);
-  has_elements_ = elements > 0;
+  bit_field_ = FastElementsField::update(
+      bit_field_,
+      (max_element_index <= 32) || ((2 * elements) >= max_element_index));
+  bit_field_ = HasElementsField::update(bit_field_, elements > 0);
+
   set_is_simple(is_simple);
   set_depth(depth_acc);
 }
@@ -662,8 +649,7 @@
                                              FeedbackVectorSlotCache* cache) {
   // This logic that computes the number of slots needed for vector store
   // ics must mirror FullCodeGenerator::VisitArrayLiteral.
-  int array_index = 0;
-  for (; array_index < values()->length(); array_index++) {
+  for (int array_index = 0; array_index < values()->length(); array_index++) {
     Expression* subexpr = values()->at(array_index);
     DCHECK(!subexpr->IsSpread());
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -731,7 +717,7 @@
     case Token::OR:
       return;
     default:
-      type_feedback_slot_ = spec->AddGeneralSlot();
+      type_feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
       return;
   }
 }
@@ -741,6 +727,20 @@
   return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
 }
 
+void CompareOperation::AssignFeedbackVectorSlots(
+    Isolate* isolate, FeedbackVectorSpec* spec,
+    FeedbackVectorSlotCache* cache_) {
+  // Feedback vector slot is only used by interpreter for binary operations.
+  // Full-codegen uses AstId to record type feedback.
+  switch (op()) {
+    // instanceof and in do not collect type feedback.
+    case Token::INSTANCEOF:
+    case Token::IN:
+      return;
+    default:
+      type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+  }
+}
 
 // Check for the pattern: typeof <expression> equals <string literal>.
 static bool MatchLiteralCompareTypeof(Expression* left,
@@ -759,8 +759,8 @@
 
 bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
                                               Handle<String>* check) {
-  return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
-      MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+  return MatchLiteralCompareTypeof(left_, op(), right_, expr, check) ||
+         MatchLiteralCompareTypeof(right_, op(), left_, expr, check);
 }
 
 
@@ -790,8 +790,8 @@
 }
 
 bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
-  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
-         MatchLiteralCompareUndefined(right_, op_, left_, expr);
+  return MatchLiteralCompareUndefined(left_, op(), right_, expr) ||
+         MatchLiteralCompareUndefined(right_, op(), left_, expr);
 }
 
 
@@ -809,8 +809,8 @@
 
 
 bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
-  return MatchLiteralCompareNull(left_, op_, right_, expr) ||
-      MatchLiteralCompareNull(right_, op_, left_, expr);
+  return MatchLiteralCompareNull(left_, op(), right_, expr) ||
+         MatchLiteralCompareNull(right_, op(), left_, expr);
 }
 
 
@@ -913,7 +913,7 @@
   if (proxy != NULL) {
     if (is_possibly_eval()) {
       return POSSIBLY_EVAL_CALL;
-    } else if (proxy->var()->IsUnallocatedOrGlobalSlot()) {
+    } else if (proxy->var()->IsUnallocated()) {
       return GLOBAL_CALL;
     } else if (proxy->var()->IsLookupSlot()) {
       return LOOKUP_SLOT_CALL;
@@ -940,7 +940,13 @@
     : Expression(pos, kCaseClause),
       label_(label),
       statements_(statements),
-      compare_type_(Type::None()) {}
+      compare_type_(AstType::None()) {}
+
+void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate,
+                                           FeedbackVectorSpec* spec,
+                                           FeedbackVectorSlotCache* cache) {
+  type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+}
 
 uint32_t Literal::Hash() {
   return raw_value()->IsString()
diff --git a/src/ast/ast.h b/src/ast/ast.h
index 1b80d3f..a6661be 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -5,6 +5,7 @@
 #ifndef V8_AST_AST_H_
 #define V8_AST_AST_H_
 
+#include "src/ast/ast-types.h"
 #include "src/ast/ast-value-factory.h"
 #include "src/ast/modules.h"
 #include "src/ast/variables.h"
@@ -17,7 +18,6 @@
 #include "src/parsing/token.h"
 #include "src/runtime/runtime.h"
 #include "src/small-pointer-list.h"
-#include "src/types.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -126,17 +126,11 @@
 #undef DEF_FORWARD_DECLARATION
 
 
-// Typedef only introduced to avoid unreadable code.
-typedef ZoneList<Handle<String>> ZoneStringList;
-typedef ZoneList<Handle<Object>> ZoneObjectList;
-
-
 class FeedbackVectorSlotCache {
  public:
   explicit FeedbackVectorSlotCache(Zone* zone)
       : zone_(zone),
-        hash_map_(base::HashMap::PointersMatch,
-                  ZoneHashMap::kDefaultHashMapCapacity,
+        hash_map_(ZoneHashMap::kDefaultHashMapCapacity,
                   ZoneAllocationPolicy(zone)) {}
 
   void Put(Variable* variable, FeedbackVectorSlot slot) {
@@ -192,7 +186,7 @@
 
   void* operator new(size_t size, Zone* zone) { return zone->New(size); }
 
-  NodeType node_type() const { return node_type_; }
+  NodeType node_type() const { return NodeTypeField::decode(bit_field_); }
   int position() const { return position_; }
 
 #ifdef DEBUG
@@ -211,19 +205,20 @@
   IterationStatement* AsIterationStatement();
   MaterializedLiteral* AsMaterializedLiteral();
 
- protected:
-  AstNode(int position, NodeType type)
-      : position_(position), node_type_(type) {}
-
  private:
   // Hidden to prevent accidental usage. It would have to load the
   // current zone from the TLS.
   void* operator new(size_t size);
 
   int position_;
-  NodeType node_type_;
-  // Ends with NodeType which is uint8_t sized. Deriving classes in turn begin
-  // sub-int32_t-sized fields for optimum packing efficiency.
+  class NodeTypeField : public BitField<NodeType, 0, 6> {};
+
+ protected:
+  uint32_t bit_field_;
+  static const uint8_t kNextBitFieldIndex = NodeTypeField::kNext;
+
+  AstNode(int position, NodeType type)
+      : position_(position), bit_field_(NodeTypeField::encode(type)) {}
 };
 
 
@@ -234,6 +229,8 @@
 
  protected:
   Statement(int position, NodeType type) : AstNode(position, type) {}
+
+  static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
 };
 
 
@@ -349,11 +346,18 @@
   BailoutId id() const { return BailoutId(local_id(0)); }
   TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
 
+ private:
+  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+  int base_id_;
+  class ToBooleanTypesField
+      : public BitField<uint16_t, AstNode::kNextBitFieldIndex, 9> {};
+
  protected:
   Expression(int pos, NodeType type)
-      : AstNode(pos, type),
-        bit_field_(0),
-        base_id_(BailoutId::None().ToInt()) {}
+      : AstNode(pos, type), base_id_(BailoutId::None().ToInt()) {
+    bit_field_ = ToBooleanTypesField::update(bit_field_, 0);
+  }
 
   static int parent_num_ids() { return 0; }
   void set_to_boolean_types(uint16_t types) {
@@ -364,12 +368,7 @@
     return base_id_;
   }
 
- private:
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
-  uint16_t bit_field_;
-  int base_id_;
-  class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
+  static const uint8_t kNextBitFieldIndex = ToBooleanTypesField::kNext;
 };
 
 
@@ -389,7 +388,7 @@
 
   // Testers.
   bool is_target_for_anonymous() const {
-    return breakable_type_ == TARGET_FOR_ANONYMOUS;
+    return BreakableTypeField::decode(bit_field_) == TARGET_FOR_ANONYMOUS;
   }
 
   void set_base_id(int id) { base_id_ = id; }
@@ -397,14 +396,28 @@
   BailoutId EntryId() const { return BailoutId(local_id(0)); }
   BailoutId ExitId() const { return BailoutId(local_id(1)); }
 
+ private:
+  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+  BreakableType breakableType() const {
+    return BreakableTypeField::decode(bit_field_);
+  }
+
+  int base_id_;
+  Label break_target_;
+  ZoneList<const AstRawString*>* labels_;
+
+  class BreakableTypeField
+      : public BitField<BreakableType, Statement::kNextBitFieldIndex, 1> {};
+
  protected:
   BreakableStatement(ZoneList<const AstRawString*>* labels,
                      BreakableType breakable_type, int position, NodeType type)
       : Statement(position, type),
-        breakable_type_(breakable_type),
         base_id_(BailoutId::None().ToInt()),
         labels_(labels) {
     DCHECK(labels == NULL || labels->length() > 0);
+    bit_field_ |= BreakableTypeField::encode(breakable_type);
   }
   static int parent_num_ids() { return 0; }
 
@@ -413,20 +426,16 @@
     return base_id_;
   }
 
- private:
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
-  BreakableType breakable_type_;
-  int base_id_;
-  Label break_target_;
-  ZoneList<const AstRawString*>* labels_;
+  static const uint8_t kNextBitFieldIndex = BreakableTypeField::kNext;
 };
 
 
 class Block final : public BreakableStatement {
  public:
   ZoneList<Statement*>* statements() { return &statements_; }
-  bool ignore_completion_value() const { return ignore_completion_value_; }
+  bool ignore_completion_value() const {
+    return IgnoreCompletionField::decode(bit_field_);
+  }
 
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId DeclsId() const { return BailoutId(local_id(0)); }
@@ -446,14 +455,20 @@
         bool ignore_completion_value, int pos)
       : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY, pos, kBlock),
         statements_(capacity, zone),
-        ignore_completion_value_(ignore_completion_value),
-        scope_(NULL) {}
+        scope_(NULL) {
+    bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value);
+  }
   static int parent_num_ids() { return BreakableStatement::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   ZoneList<Statement*> statements_;
-  bool ignore_completion_value_;
   Scope* scope_;
+
+  class IgnoreCompletionField
+      : public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
+
+ protected:
+  static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
 };
 
 
@@ -469,6 +484,9 @@
   }
   bool IsAnonymousFunctionDefinition() const;
 
+ protected:
+  static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
+
  private:
   friend class AstNodeFactory;
 
@@ -498,6 +516,8 @@
   Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
       : AstNode(pos, type), proxy_(proxy), scope_(scope) {}
 
+  static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
+
  private:
   VariableProxy* proxy_;
 
@@ -561,6 +581,9 @@
   static int parent_num_ids() { return BreakableStatement::num_ids(); }
   void Initialize(Statement* body) { body_ = body; }
 
+  static const uint8_t kNextBitFieldIndex =
+      BreakableStatement::kNextBitFieldIndex;
+
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
@@ -715,16 +738,19 @@
   }
 
   enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
-  ForInType for_in_type() const { return for_in_type_; }
-  void set_for_in_type(ForInType type) { for_in_type_ = type; }
+  ForInType for_in_type() const { return ForInTypeField::decode(bit_field_); }
+  void set_for_in_type(ForInType type) {
+    bit_field_ = ForInTypeField::update(bit_field_, type);
+  }
 
-  static int num_ids() { return parent_num_ids() + 6; }
+  static int num_ids() { return parent_num_ids() + 7; }
   BailoutId BodyId() const { return BailoutId(local_id(0)); }
   BailoutId EnumId() const { return BailoutId(local_id(1)); }
   BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
   BailoutId PrepareId() const { return BailoutId(local_id(3)); }
   BailoutId FilterId() const { return BailoutId(local_id(4)); }
   BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
+  BailoutId IncrementId() const { return BailoutId(local_id(6)); }
   BailoutId ContinueId() const { return EntryId(); }
   BailoutId StackCheckId() const { return BodyId(); }
 
@@ -734,16 +760,23 @@
   ForInStatement(ZoneList<const AstRawString*>* labels, int pos)
       : ForEachStatement(labels, pos, kForInStatement),
         each_(nullptr),
-        subject_(nullptr),
-        for_in_type_(SLOW_FOR_IN) {}
+        subject_(nullptr) {
+    bit_field_ = ForInTypeField::update(bit_field_, SLOW_FOR_IN);
+  }
+
   static int parent_num_ids() { return ForEachStatement::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   Expression* each_;
   Expression* subject_;
-  ForInType for_in_type_;
   FeedbackVectorSlot each_slot_;
   FeedbackVectorSlot for_in_feedback_slot_;
+
+  class ForInTypeField
+      : public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
+
+ protected:
+  static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
 };
 
 
@@ -938,8 +971,18 @@
   BailoutId EntryId() const { return BailoutId(local_id(0)); }
   TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
 
-  Type* compare_type() { return compare_type_; }
-  void set_compare_type(Type* type) { compare_type_ = type; }
+  AstType* compare_type() { return compare_type_; }
+  void set_compare_type(AstType* type) { compare_type_ = type; }
+
+  // CaseClause will have both a slot in the feedback vector and the
+  // TypeFeedbackId to record the type information. TypeFeedbackId is used by
+  // full codegen and the feedback vector slot is used by interpreter.
+  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+                                 FeedbackVectorSlotCache* cache);
+
+  FeedbackVectorSlot CompareOperationFeedbackSlot() {
+    return type_feedback_slot_;
+  }
 
  private:
   friend class AstNodeFactory;
@@ -951,7 +994,8 @@
   Expression* label_;
   Label body_target_;
   ZoneList<Statement*>* statements_;
-  Type* compare_type_;
+  AstType* compare_type_;
+  FeedbackVectorSlot type_feedback_slot_;
 };
 
 
@@ -1241,17 +1285,27 @@
     return depth_;
   }
 
+ private:
+  int depth_ : 31;
+  int literal_index_;
+
+  friend class AstLiteralReindexer;
+
+  class IsSimpleField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+
  protected:
   MaterializedLiteral(int literal_index, int pos, NodeType type)
-      : Expression(pos, type),
-        is_simple_(false),
-        depth_(0),
-        literal_index_(literal_index) {}
+      : Expression(pos, type), depth_(0), literal_index_(literal_index) {
+    bit_field_ |= IsSimpleField::encode(false);
+  }
 
   // A materialized literal is simple if the values consist of only
   // constants and simple object and array literals.
-  bool is_simple() const { return is_simple_; }
-  void set_is_simple(bool is_simple) { is_simple_ = is_simple; }
+  bool is_simple() const { return IsSimpleField::decode(bit_field_); }
+  void set_is_simple(bool is_simple) {
+    bit_field_ = IsSimpleField::update(bit_field_, is_simple);
+  }
   friend class CompileTimeValue;
 
   void set_depth(int depth) {
@@ -1271,19 +1325,45 @@
   // in the object literal boilerplate.
   Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
 
- private:
-  bool is_simple_ : 1;
-  int depth_ : 31;
-  int literal_index_;
-
-  friend class AstLiteralReindexer;
+  static const uint8_t kNextBitFieldIndex = IsSimpleField::kNext;
 };
 
+// Common supertype for ObjectLiteralProperty and ClassLiteralProperty
+class LiteralProperty : public ZoneObject {
+ public:
+  Expression* key() const { return key_; }
+  Expression* value() const { return value_; }
+  void set_key(Expression* e) { key_ = e; }
+  void set_value(Expression* e) { value_ = e; }
+
+  bool is_computed_name() const { return is_computed_name_; }
+
+  FeedbackVectorSlot GetSlot(int offset = 0) const {
+    DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+    return slots_[offset];
+  }
+
+  void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+    DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+    slots_[offset] = slot;
+  }
+
+  bool NeedsSetFunctionName() const;
+
+ protected:
+  LiteralProperty(Expression* key, Expression* value, bool is_computed_name)
+      : key_(key), value_(value), is_computed_name_(is_computed_name) {}
+
+  Expression* key_;
+  Expression* value_;
+  FeedbackVectorSlot slots_[2];
+  bool is_computed_name_;
+};
 
 // Property is used for passing information
 // about an object literal's properties from the parser
 // to the code generator.
-class ObjectLiteralProperty final : public ZoneObject {
+class ObjectLiteralProperty final : public LiteralProperty {
  public:
   enum Kind : uint8_t {
     CONSTANT,              // Property with constant value (compile time).
@@ -1294,54 +1374,29 @@
     PROTOTYPE  // Property is __proto__.
   };
 
-  Expression* key() { return key_; }
-  Expression* value() { return value_; }
-  Kind kind() { return kind_; }
-
-  void set_key(Expression* e) { key_ = e; }
-  void set_value(Expression* e) { value_ = e; }
+  Kind kind() const { return kind_; }
 
   // Type feedback information.
-  bool IsMonomorphic() { return !receiver_type_.is_null(); }
-  Handle<Map> GetReceiverType() { return receiver_type_; }
+  bool IsMonomorphic() const { return !receiver_type_.is_null(); }
+  Handle<Map> GetReceiverType() const { return receiver_type_; }
 
-  bool IsCompileTimeValue();
+  bool IsCompileTimeValue() const;
 
   void set_emit_store(bool emit_store);
-  bool emit_store();
-
-  bool is_static() const { return is_static_; }
-  bool is_computed_name() const { return is_computed_name_; }
-
-  FeedbackVectorSlot GetSlot(int offset = 0) const {
-    DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
-    return slots_[offset];
-  }
-  void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
-    DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
-    slots_[offset] = slot;
-  }
+  bool emit_store() const;
 
   void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
 
-  bool NeedsSetFunctionName() const;
-
  private:
   friend class AstNodeFactory;
 
   ObjectLiteralProperty(Expression* key, Expression* value, Kind kind,
-                        bool is_static, bool is_computed_name);
-  ObjectLiteralProperty(AstValueFactory* ast_value_factory, Expression* key,
-                        Expression* value, bool is_static,
                         bool is_computed_name);
+  ObjectLiteralProperty(AstValueFactory* ast_value_factory, Expression* key,
+                        Expression* value, bool is_computed_name);
 
-  Expression* key_;
-  Expression* value_;
-  FeedbackVectorSlot slots_[2];
   Kind kind_;
   bool emit_store_;
-  bool is_static_;
-  bool is_computed_name_;
   Handle<Map> receiver_type_;
 };
 
@@ -1357,9 +1412,11 @@
   }
   int properties_count() const { return boilerplate_properties_; }
   ZoneList<Property*>* properties() const { return properties_; }
-  bool fast_elements() const { return fast_elements_; }
-  bool may_store_doubles() const { return may_store_doubles_; }
-  bool has_elements() const { return has_elements_; }
+  bool fast_elements() const { return FastElementsField::decode(bit_field_); }
+  bool may_store_doubles() const {
+    return MayStoreDoublesField::decode(bit_field_);
+  }
+  bool has_elements() const { return HasElementsField::decode(bit_field_); }
   bool has_shallow_properties() const {
     return depth() == 1 && !has_elements() && !may_store_doubles();
   }
@@ -1429,33 +1486,42 @@
                 uint32_t boilerplate_properties, int pos)
       : MaterializedLiteral(literal_index, pos, kObjectLiteral),
         boilerplate_properties_(boilerplate_properties),
-        fast_elements_(false),
-        has_elements_(false),
-        may_store_doubles_(false),
-        properties_(properties) {}
+        properties_(properties) {
+    bit_field_ |= FastElementsField::encode(false) |
+                  HasElementsField::encode(false) |
+                  MayStoreDoublesField::encode(false);
+  }
 
   static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  uint32_t boilerplate_properties_ : 29;
-  bool fast_elements_ : 1;
-  bool has_elements_ : 1;
-  bool may_store_doubles_ : 1;
+  uint32_t boilerplate_properties_;
   FeedbackVectorSlot slot_;
   Handle<FixedArray> constant_properties_;
   ZoneList<Property*>* properties_;
+
+  class FastElementsField
+      : public BitField<bool, MaterializedLiteral::kNextBitFieldIndex, 1> {};
+  class HasElementsField : public BitField<bool, FastElementsField::kNext, 1> {
+  };
+  class MayStoreDoublesField
+      : public BitField<bool, HasElementsField::kNext, 1> {};
+
+ protected:
+  static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
 };
 
 
 // A map from property names to getter/setter pairs allocated in the zone.
 class AccessorTable
     : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+                                   bool (*)(void*, void*),
                                    ZoneAllocationPolicy> {
  public:
   explicit AccessorTable(Zone* zone)
       : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
-                              ZoneAllocationPolicy>(Literal::Match,
-                                                    ZoneAllocationPolicy(zone)),
+                              bool (*)(void*, void*), ZoneAllocationPolicy>(
+            Literal::Match, ZoneAllocationPolicy(zone)),
         zone_(zone) {}
 
   Iterator lookup(Literal* literal) {
@@ -1628,19 +1694,19 @@
   friend class AstNodeFactory;
 
   VariableProxy(Variable* var, int start_position, int end_position);
-  VariableProxy(const AstRawString* name, Variable::Kind variable_kind,
+  VariableProxy(const AstRawString* name, VariableKind variable_kind,
                 int start_position, int end_position);
   explicit VariableProxy(const VariableProxy* copy_from);
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  class IsThisField : public BitField8<bool, 0, 1> {};
-  class IsAssignedField : public BitField8<bool, 1, 1> {};
-  class IsResolvedField : public BitField8<bool, 2, 1> {};
-  class IsNewTargetField : public BitField8<bool, 3, 1> {};
+  class IsThisField : public BitField<bool, Expression::kNextBitFieldIndex, 1> {
+  };
+  class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
+  class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
+  class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
 
-  uint8_t bit_field_;
   // Position is stored in the AstNode superclass, but VariableProxy needs to
   // know its end position too (for error messages). It cannot be inferred from
   // the variable name length because it can contain escapes.
@@ -1737,22 +1803,24 @@
   friend class AstNodeFactory;
 
   Property(Expression* obj, Expression* key, int pos)
-      : Expression(pos, kProperty),
-        bit_field_(IsForCallField::encode(false) |
-                   IsStringAccessField::encode(false) |
-                   InlineCacheStateField::encode(UNINITIALIZED)),
-        obj_(obj),
-        key_(key) {}
+      : Expression(pos, kProperty), obj_(obj), key_(key) {
+    bit_field_ |= IsForCallField::encode(false) |
+                  IsStringAccessField::encode(false) |
+                  InlineCacheStateField::encode(UNINITIALIZED);
+  }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  class IsForCallField : public BitField8<bool, 0, 1> {};
-  class IsStringAccessField : public BitField8<bool, 1, 1> {};
-  class KeyTypeField : public BitField8<IcCheckType, 2, 1> {};
-  class InlineCacheStateField : public BitField8<InlineCacheState, 3, 4> {};
+  class IsForCallField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+  class IsStringAccessField : public BitField<bool, IsForCallField::kNext, 1> {
+  };
+  class KeyTypeField
+      : public BitField<IcCheckType, IsStringAccessField::kNext, 1> {};
+  class InlineCacheStateField
+      : public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
 
-  uint8_t bit_field_;
   FeedbackVectorSlot property_feedback_slot_;
   Expression* obj_;
   Expression* key_;
@@ -1789,15 +1857,6 @@
     return !target_.is_null();
   }
 
-  bool global_call() const {
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    return proxy != NULL && proxy->var()->IsUnallocatedOrGlobalSlot();
-  }
-
-  bool known_global_function() const {
-    return global_call() && !target_.is_null();
-  }
-
   Handle<JSFunction> target() { return target_; }
 
   Handle<AllocationSite> allocation_site() { return allocation_site_; }
@@ -1867,11 +1926,12 @@
   Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
        PossiblyEval possibly_eval)
       : Expression(pos, kCall),
-        bit_field_(
-            IsUninitializedField::encode(false) |
-            IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL)),
         expression_(expression),
         arguments_(arguments) {
+    bit_field_ |=
+        IsUninitializedField::encode(false) |
+        IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL);
+
     if (expression->IsProperty()) {
       expression->AsProperty()->mark_for_call();
     }
@@ -1880,11 +1940,11 @@
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  class IsUninitializedField : public BitField8<bool, 0, 1> {};
-  class IsTailField : public BitField8<bool, 1, 1> {};
-  class IsPossiblyEvalField : public BitField8<bool, 2, 1> {};
+  class IsUninitializedField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+  class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
+  class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
 
-  uint8_t bit_field_;
   FeedbackVectorSlot ic_slot_;
   FeedbackVectorSlot stub_slot_;
   Expression* expression_;
@@ -1904,10 +1964,9 @@
   // Type feedback information.
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
                                  FeedbackVectorSlotCache* cache) {
-    callnew_feedback_slot_ = spec->AddGeneralSlot();
-    // Construct calls have two slots, one right after the other.
-    // The second slot stores the call count for monomorphic calls.
-    spec->AddGeneralSlot();
+    // CallNew stores feedback in the exact same way as Call. We can
+    // piggyback on the type feedback infrastructure for calls.
+    callnew_feedback_slot_ = spec->AddCallICSlot();
   }
 
   FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -1915,7 +1974,7 @@
     return callnew_feedback_slot_;
   }
 
-  bool IsMonomorphic() const { return is_monomorphic_; }
+  bool IsMonomorphic() const { return IsMonomorphicField::decode(bit_field_); }
   Handle<JSFunction> target() const { return target_; }
   Handle<AllocationSite> allocation_site() const {
     return allocation_site_;
@@ -1928,11 +1987,13 @@
   void set_allocation_site(Handle<AllocationSite> site) {
     allocation_site_ = site;
   }
-  void set_is_monomorphic(bool monomorphic) { is_monomorphic_ = monomorphic; }
+  void set_is_monomorphic(bool monomorphic) {
+    bit_field_ = IsMonomorphicField::update(bit_field_, monomorphic);
+  }
   void set_target(Handle<JSFunction> target) { target_ = target; }
   void SetKnownGlobalTarget(Handle<JSFunction> target) {
     target_ = target;
-    is_monomorphic_ = true;
+    set_is_monomorphic(true);
   }
 
  private:
@@ -1940,19 +2001,22 @@
 
   CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
       : Expression(pos, kCallNew),
-        is_monomorphic_(false),
         expression_(expression),
-        arguments_(arguments) {}
+        arguments_(arguments) {
+    bit_field_ |= IsMonomorphicField::encode(false);
+  }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  bool is_monomorphic_;
   FeedbackVectorSlot callnew_feedback_slot_;
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   Handle<JSFunction> target_;
   Handle<AllocationSite> allocation_site_;
+
+  class IsMonomorphicField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
 };
 
 
@@ -1969,6 +2033,10 @@
     DCHECK(is_jsruntime());
     return context_index_;
   }
+  void set_context_index(int index) {
+    DCHECK(is_jsruntime());
+    context_index_ = index;
+  }
   const Runtime::Function* function() const {
     DCHECK(!is_jsruntime());
     return function_;
@@ -2006,7 +2074,7 @@
 
 class UnaryOperation final : public Expression {
  public:
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return OperatorField::decode(bit_field_); }
   Expression* expression() const { return expression_; }
   void set_expression(Expression* e) { expression_ = e; }
 
@@ -2022,21 +2090,24 @@
   friend class AstNodeFactory;
 
   UnaryOperation(Token::Value op, Expression* expression, int pos)
-      : Expression(pos, kUnaryOperation), op_(op), expression_(expression) {
+      : Expression(pos, kUnaryOperation), expression_(expression) {
+    bit_field_ |= OperatorField::encode(op);
     DCHECK(Token::IsUnaryOp(op));
   }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  Token::Value op_;
   Expression* expression_;
+
+  class OperatorField
+      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
 };
 
 
 class BinaryOperation final : public Expression {
  public:
-  Token::Value op() const { return static_cast<Token::Value>(op_); }
+  Token::Value op() const { return OperatorField::decode(bit_field_); }
   Expression* left() const { return left_; }
   void set_left(Expression* e) { left_ = e; }
   Expression* right() const { return right_; }
@@ -2090,18 +2161,17 @@
 
   BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos)
       : Expression(pos, kBinaryOperation),
-        op_(static_cast<byte>(op)),
         has_fixed_right_arg_(false),
         fixed_right_arg_value_(0),
         left_(left),
         right_(right) {
+    bit_field_ |= OperatorField::encode(op);
     DCHECK(Token::IsBinaryOp(op));
   }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  const byte op_;  // actually Token::Value
   // TODO(rossberg): the fixed arg should probably be represented as a Constant
   // type for the RHS. Currenty it's actually a Maybe<int>
   bool has_fixed_right_arg_;
@@ -2110,6 +2180,9 @@
   Expression* right_;
   Handle<AllocationSite> allocation_site_;
   FeedbackVectorSlot type_feedback_slot_;
+
+  class OperatorField
+      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
 };
 
 
@@ -2132,14 +2205,14 @@
   KeyedAccessStoreMode GetStoreMode() const {
     return StoreModeField::decode(bit_field_);
   }
-  Type* type() const { return type_; }
+  AstType* type() const { return type_; }
   void set_key_type(IcCheckType type) {
     bit_field_ = KeyTypeField::update(bit_field_, type);
   }
   void set_store_mode(KeyedAccessStoreMode mode) {
     bit_field_ = StoreModeField::update(bit_field_, mode);
   }
-  void set_type(Type* type) { type_ = type; }
+  void set_type(AstType* type) { type_ = type; }
 
   static int num_ids() { return parent_num_ids() + 4; }
   BailoutId AssignmentId() const { return BailoutId(local_id(0)); }
@@ -2164,27 +2237,25 @@
   friend class AstNodeFactory;
 
   CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
-      : Expression(pos, kCountOperation),
-        bit_field_(
-            IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
-            StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
-        type_(NULL),
-        expression_(expr) {}
+      : Expression(pos, kCountOperation), type_(NULL), expression_(expr) {
+    bit_field_ |=
+        IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
+        StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+  }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  class IsPrefixField : public BitField16<bool, 0, 1> {};
-  class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
-  class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
-  class TokenField : public BitField16<Token::Value, 5, 8> {};
+  class IsPrefixField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+  class KeyTypeField : public BitField<IcCheckType, IsPrefixField::kNext, 1> {};
+  class StoreModeField
+      : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+  class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
 
-  // Starts with 16-bit field, which should get packed together with
-  // Expression's trailing 16-bit field.
-  uint16_t bit_field_;
   FeedbackVectorSlot slot_;
   FeedbackVectorSlot binary_operation_slot_;
-  Type* type_;
+  AstType* type_;
   Expression* expression_;
   SmallMapList receiver_types_;
 };
@@ -2192,7 +2263,7 @@
 
 class CompareOperation final : public Expression {
  public:
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return OperatorField::decode(bit_field_); }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
 
@@ -2204,8 +2275,18 @@
   TypeFeedbackId CompareOperationFeedbackId() const {
     return TypeFeedbackId(local_id(0));
   }
-  Type* combined_type() const { return combined_type_; }
-  void set_combined_type(Type* type) { combined_type_ = type; }
+  AstType* combined_type() const { return combined_type_; }
+  void set_combined_type(AstType* type) { combined_type_ = type; }
+
+  // CompareOperation will have both a slot in the feedback vector and the
+  // TypeFeedbackId to record the type information. TypeFeedbackId is used
+  // by full codegen and the feedback vector slot is used by interpreter.
+  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+                                 FeedbackVectorSlotCache* cache);
+
+  FeedbackVectorSlot CompareOperationFeedbackSlot() const {
+    return type_feedback_slot_;
+  }
 
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2218,21 +2299,23 @@
   CompareOperation(Token::Value op, Expression* left, Expression* right,
                    int pos)
       : Expression(pos, kCompareOperation),
-        op_(op),
         left_(left),
         right_(right),
-        combined_type_(Type::None()) {
+        combined_type_(AstType::None()) {
+    bit_field_ |= OperatorField::encode(op);
     DCHECK(Token::IsCompareOp(op));
   }
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  Token::Value op_;
   Expression* left_;
   Expression* right_;
 
-  Type* combined_type_;
+  AstType* combined_type_;
+  FeedbackVectorSlot type_feedback_slot_;
+  class OperatorField
+      : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
 };
 
 
@@ -2356,17 +2439,14 @@
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  class IsUninitializedField : public BitField16<bool, 0, 1> {};
+  class IsUninitializedField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
   class KeyTypeField
-      : public BitField16<IcCheckType, IsUninitializedField::kNext, 1> {};
+      : public BitField<IcCheckType, IsUninitializedField::kNext, 1> {};
   class StoreModeField
-      : public BitField16<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
-  class TokenField : public BitField16<Token::Value, StoreModeField::kNext, 8> {
-  };
+      : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+  class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
 
-  // Starts with 16-bit field, which should get packed together with
-  // Expression's trailing 16-bit field.
-  uint16_t bit_field_;
   FeedbackVectorSlot slot_;
   Expression* target_;
   Expression* value_;
@@ -2393,14 +2473,14 @@
 class RewritableExpression final : public Expression {
  public:
   Expression* expression() const { return expr_; }
-  bool is_rewritten() const { return is_rewritten_; }
+  bool is_rewritten() const { return IsRewrittenField::decode(bit_field_); }
 
   void Rewrite(Expression* new_expression) {
     DCHECK(!is_rewritten());
     DCHECK_NOT_NULL(new_expression);
     DCHECK(!new_expression->IsRewritableExpression());
     expr_ = new_expression;
-    is_rewritten_ = true;
+    bit_field_ = IsRewrittenField::update(bit_field_, true);
   }
 
   static int num_ids() { return parent_num_ids(); }
@@ -2410,15 +2490,17 @@
 
   explicit RewritableExpression(Expression* expression)
       : Expression(expression->position(), kRewritableExpression),
-        is_rewritten_(false),
         expr_(expression) {
+    bit_field_ |= IsRewrittenField::encode(false);
     DCHECK(!expression->IsRewritableExpression());
   }
 
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  bool is_rewritten_;
   Expression* expr_;
+
+  class IsRewrittenField
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
 };
 
 // Our Yield is different from the JS yield in that it "returns" its argument as
@@ -2430,8 +2512,11 @@
 
   Expression* generator_object() const { return generator_object_; }
   Expression* expression() const { return expression_; }
+  OnException on_exception() const {
+    return OnExceptionField::decode(bit_field_);
+  }
   bool rethrow_on_exception() const {
-    return on_exception_ == kOnExceptionRethrow;
+    return on_exception() == kOnExceptionRethrow;
   }
   int yield_id() const { return yield_id_; }
 
@@ -2445,15 +2530,18 @@
   Yield(Expression* generator_object, Expression* expression, int pos,
         OnException on_exception)
       : Expression(pos, kYield),
-        on_exception_(on_exception),
         yield_id_(-1),
         generator_object_(generator_object),
-        expression_(expression) {}
+        expression_(expression) {
+    bit_field_ |= OnExceptionField::encode(on_exception);
+  }
 
-  OnException on_exception_;
   int yield_id_;
   Expression* generator_object_;
   Expression* expression_;
+
+  class OnExceptionField
+      : public BitField<OnException, Expression::kNextBitFieldIndex, 1> {};
 };
 
 
@@ -2547,14 +2635,14 @@
     inferred_name_ = Handle<String>();
   }
 
-  bool pretenure() const { return Pretenure::decode(bitfield_); }
-  void set_pretenure() { bitfield_ = Pretenure::update(bitfield_, true); }
+  bool pretenure() const { return Pretenure::decode(bit_field_); }
+  void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
 
   bool has_duplicate_parameters() const {
-    return HasDuplicateParameters::decode(bitfield_);
+    return HasDuplicateParameters::decode(bit_field_);
   }
 
-  bool is_function() const { return IsFunction::decode(bitfield_); }
+  bool is_function() const { return IsFunction::decode(bit_field_); }
 
   // This is used as a heuristic on when to eagerly compile a function
   // literal. We consider the following constructs as hints that the
@@ -2562,25 +2650,25 @@
   // - (function() { ... })();
   // - var x = function() { ... }();
   bool should_eager_compile() const {
-    return ShouldEagerCompile::decode(bitfield_);
+    return ShouldEagerCompile::decode(bit_field_);
   }
   void set_should_eager_compile() {
-    bitfield_ = ShouldEagerCompile::update(bitfield_, true);
+    bit_field_ = ShouldEagerCompile::update(bit_field_, true);
   }
 
   // A hint that we expect this function to be called (exactly) once,
   // i.e. we suspect it's an initialization function.
   bool should_be_used_once_hint() const {
-    return ShouldBeUsedOnceHint::decode(bitfield_);
+    return ShouldNotBeUsedOnceHintField::decode(bit_field_);
   }
   void set_should_be_used_once_hint() {
-    bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
+    bit_field_ = ShouldNotBeUsedOnceHintField::update(bit_field_, true);
   }
 
   FunctionType function_type() const {
-    return FunctionTypeBits::decode(bitfield_);
+    return FunctionTypeBits::decode(bit_field_);
   }
-  FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
+  FunctionKind kind() const;
 
   int ast_node_count() { return ast_properties_.node_count(); }
   AstProperties::Flags flags() const { return ast_properties_.flags(); }
@@ -2590,10 +2678,12 @@
   const FeedbackVectorSpec* feedback_vector_spec() const {
     return ast_properties_.get_spec();
   }
-  bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
-  BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+  bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
+  BailoutReason dont_optimize_reason() {
+    return DontOptimizeReasonField::decode(bit_field_);
+  }
   void set_dont_optimize_reason(BailoutReason reason) {
-    dont_optimize_reason_ = reason;
+    bit_field_ = DontOptimizeReasonField::update(bit_field_, reason);
   }
 
   bool IsAnonymousFunctionDefinition() const {
@@ -2603,6 +2693,21 @@
   int yield_count() { return yield_count_; }
   void set_yield_count(int yield_count) { yield_count_ = yield_count; }
 
+  bool requires_class_field_init() {
+    return RequiresClassFieldInit::decode(bit_field_);
+  }
+  void set_requires_class_field_init(bool requires_class_field_init) {
+    bit_field_ =
+        RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
+  }
+  bool is_class_field_initializer() {
+    return IsClassFieldInitializer::decode(bit_field_);
+  }
+  void set_is_class_field_initializer(bool is_class_field_initializer) {
+    bit_field_ =
+        IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
+  }
+
  private:
   friend class AstNodeFactory;
 
@@ -2612,10 +2717,9 @@
                   int expected_property_count, int parameter_count,
                   FunctionType function_type,
                   ParameterFlag has_duplicate_parameters,
-                  EagerCompileHint eager_compile_hint, FunctionKind kind,
-                  int position, bool is_function)
+                  EagerCompileHint eager_compile_hint, int position,
+                  bool is_function)
       : Expression(position, kFunctionLiteral),
-        dont_optimize_reason_(kNoReason),
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         parameter_count_(parameter_count),
@@ -2626,29 +2730,32 @@
         body_(body),
         raw_inferred_name_(ast_value_factory->empty_string()),
         ast_properties_(zone) {
-    bitfield_ =
+    bit_field_ |=
         FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
         HasDuplicateParameters::encode(has_duplicate_parameters ==
                                        kHasDuplicateParameters) |
         IsFunction::encode(is_function) |
         ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
-        FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
-    DCHECK(IsValidFunctionKind(kind));
+        RequiresClassFieldInit::encode(false) |
+        ShouldNotBeUsedOnceHintField::encode(false) |
+        DontOptimizeReasonField::encode(kNoReason) |
+        IsClassFieldInitializer::encode(false);
   }
 
-  class FunctionTypeBits : public BitField16<FunctionType, 0, 2> {};
-  class Pretenure : public BitField16<bool, 2, 1> {};
-  class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
-  class IsFunction : public BitField16<bool, 4, 1> {};
-  class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
-  class ShouldBeUsedOnceHint : public BitField16<bool, 6, 1> {};
-  class FunctionKindBits : public BitField16<FunctionKind, 7, 9> {};
-
-  // Start with 16-bit field, which should get packed together
-  // with Expression's trailing 16-bit field.
-  uint16_t bitfield_;
-
-  BailoutReason dont_optimize_reason_;
+  class FunctionTypeBits
+      : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
+  class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
+  class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
+  class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
+  class ShouldEagerCompile : public BitField<bool, IsFunction::kNext, 1> {};
+  class ShouldNotBeUsedOnceHintField
+      : public BitField<bool, ShouldEagerCompile::kNext, 1> {};
+  class RequiresClassFieldInit
+      : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
+  class IsClassFieldInitializer
+      : public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
+  class DontOptimizeReasonField
+      : public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {};
 
   int materialized_literal_count_;
   int expected_property_count_;
@@ -2664,10 +2771,29 @@
   AstProperties ast_properties_;
 };
 
+// Property is used for passing information
+// about a class literal's properties from the parser to the code generator.
+class ClassLiteralProperty final : public LiteralProperty {
+ public:
+  enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
+
+  Kind kind() const { return kind_; }
+
+  bool is_static() const { return is_static_; }
+
+ private:
+  friend class AstNodeFactory;
+
+  ClassLiteralProperty(Expression* key, Expression* value, Kind kind,
+                       bool is_static, bool is_computed_name);
+
+  Kind kind_;
+  bool is_static_;
+};
 
 class ClassLiteral final : public Expression {
  public:
-  typedef ObjectLiteralProperty Property;
+  typedef ClassLiteralProperty Property;
 
   VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
   Expression* extends() const { return extends_; }
@@ -2678,6 +2804,13 @@
   int start_position() const { return position(); }
   int end_position() const { return end_position_; }
 
+  VariableProxy* static_initializer_proxy() const {
+    return static_initializer_proxy_;
+  }
+  void set_static_initializer_proxy(VariableProxy* proxy) {
+    static_initializer_proxy_ = proxy;
+  }
+
   BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
   BailoutId PrototypeId() { return BailoutId(local_id(1)); }
 
@@ -2712,7 +2845,8 @@
         class_variable_proxy_(class_variable_proxy),
         extends_(extends),
         constructor_(constructor),
-        properties_(properties) {}
+        properties_(properties),
+        static_initializer_proxy_(nullptr) {}
 
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -2724,6 +2858,7 @@
   Expression* extends_;
   FunctionLiteral* constructor_;
   ZoneList<Property*>* properties_;
+  VariableProxy* static_initializer_proxy_;
 };
 
 
@@ -3097,6 +3232,16 @@
         try_block, scope, variable, catch_block, HandlerTable::DESUGARING, pos);
   }
 
+  TryCatchStatement* NewTryCatchStatementForAsyncAwait(Block* try_block,
+                                                       Scope* scope,
+                                                       Variable* variable,
+                                                       Block* catch_block,
+                                                       int pos) {
+    return new (zone_)
+        TryCatchStatement(try_block, scope, variable, catch_block,
+                          HandlerTable::ASYNC_AWAIT, pos);
+  }
+
   TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
                                               Block* finally_block, int pos) {
     return new (zone_) TryFinallyStatement(try_block, finally_block, pos);
@@ -3110,9 +3255,9 @@
     return new (zone_) EmptyStatement(pos);
   }
 
-  SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
-      Statement* statement, Scope* scope) {
-    return new (zone_) SloppyBlockFunctionStatement(statement, scope);
+  SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) {
+    return new (zone_) SloppyBlockFunctionStatement(
+        NewEmptyStatement(kNoSourcePosition), scope);
   }
 
   CaseClause* NewCaseClause(
@@ -3163,17 +3308,16 @@
 
   ObjectLiteral::Property* NewObjectLiteralProperty(
       Expression* key, Expression* value, ObjectLiteralProperty::Kind kind,
-      bool is_static, bool is_computed_name) {
+      bool is_computed_name) {
     return new (zone_)
-        ObjectLiteral::Property(key, value, kind, is_static, is_computed_name);
+        ObjectLiteral::Property(key, value, kind, is_computed_name);
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(Expression* key,
                                                     Expression* value,
-                                                    bool is_static,
                                                     bool is_computed_name) {
     return new (zone_) ObjectLiteral::Property(ast_value_factory_, key, value,
-                                               is_static, is_computed_name);
+                                               is_computed_name);
   }
 
   RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
@@ -3201,7 +3345,7 @@
   }
 
   VariableProxy* NewVariableProxy(const AstRawString* name,
-                                  Variable::Kind variable_kind,
+                                  VariableKind variable_kind,
                                   int start_position = kNoSourcePosition,
                                   int end_position = kNoSourcePosition) {
     DCHECK_NOT_NULL(name);
@@ -3318,13 +3462,12 @@
       int expected_property_count, int parameter_count,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
-      int position) {
-    return new (zone_) FunctionLiteral(
-        zone_, name, ast_value_factory_, scope, body,
-        materialized_literal_count, expected_property_count, parameter_count,
-        function_type, has_duplicate_parameters, eager_compile_hint, kind,
-        position, true);
+      FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
+    return new (zone_) FunctionLiteral(zone_, name, ast_value_factory_, scope,
+                                       body, materialized_literal_count,
+                                       expected_property_count, parameter_count,
+                                       function_type, has_duplicate_parameters,
+                                       eager_compile_hint, position, true);
   }
 
   // Creates a FunctionLiteral representing a top-level script, the
@@ -3332,19 +3475,26 @@
   // the Function constructor.
   FunctionLiteral* NewScriptOrEvalFunctionLiteral(
       DeclarationScope* scope, ZoneList<Statement*>* body,
-      int materialized_literal_count, int expected_property_count) {
+      int materialized_literal_count, int expected_property_count,
+      int parameter_count) {
     return new (zone_) FunctionLiteral(
         zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
-        body, materialized_literal_count, expected_property_count, 0,
-        FunctionLiteral::kAnonymousExpression,
+        body, materialized_literal_count, expected_property_count,
+        parameter_count, FunctionLiteral::kAnonymousExpression,
         FunctionLiteral::kNoDuplicateParameters,
-        FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction, 0,
-        false);
+        FunctionLiteral::kShouldLazyCompile, 0, false);
+  }
+
+  ClassLiteral::Property* NewClassLiteralProperty(
+      Expression* key, Expression* value, ClassLiteralProperty::Kind kind,
+      bool is_static, bool is_computed_name) {
+    return new (zone_)
+        ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
   }
 
   ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
                                 FunctionLiteral* constructor,
-                                ZoneList<ObjectLiteral::Property*>* properties,
+                                ZoneList<ClassLiteral::Property*>* properties,
                                 int start_position, int end_position) {
     return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
                                     start_position, end_position);
@@ -3396,7 +3546,8 @@
       }
     }
 
-    ~BodyScope() { factory_->zone_ = prev_zone_; }
+    void Reset() { factory_->zone_ = prev_zone_; }
+    ~BodyScope() { Reset(); }
 
    private:
     AstNodeFactory* factory_;
diff --git a/src/ast/compile-time-value.cc b/src/ast/compile-time-value.cc
new file mode 100644
index 0000000..eda536b
--- /dev/null
+++ b/src/ast/compile-time-value.cc
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/compile-time-value.h"
+
+#include "src/ast/ast.h"
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+  if (expression->IsLiteral()) return true;
+  MaterializedLiteral* lit = expression->AsMaterializedLiteral();
+  return lit != NULL && lit->is_simple();
+}
+
+Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
+                                              Expression* expression) {
+  Factory* factory = isolate->factory();
+  DCHECK(IsCompileTimeValue(expression));
+  Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
+  ObjectLiteral* object_literal = expression->AsObjectLiteral();
+  if (object_literal != NULL) {
+    DCHECK(object_literal->is_simple());
+    if (object_literal->fast_elements()) {
+      result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
+    } else {
+      result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
+    }
+    result->set(kElementsSlot, *object_literal->constant_properties());
+  } else {
+    ArrayLiteral* array_literal = expression->AsArrayLiteral();
+    DCHECK(array_literal != NULL && array_literal->is_simple());
+    result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+    result->set(kElementsSlot, *array_literal->constant_elements());
+  }
+  return result;
+}
+
+CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
+    Handle<FixedArray> value) {
+  Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
+  return static_cast<LiteralType>(literal_type->value());
+}
+
+Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+  return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ast/compile-time-value.h b/src/ast/compile-time-value.h
new file mode 100644
index 0000000..27351b7
--- /dev/null
+++ b/src/ast/compile-time-value.h
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_COMPILE_TIME_VALUE
+#define V8_AST_COMPILE_TIME_VALUE
+
+#include "src/allocation.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Expression;
+
+// Support for handling complex values (array and object literals) that
+// can be fully handled at compile time.
+class CompileTimeValue : public AllStatic {
+ public:
+  enum LiteralType {
+    OBJECT_LITERAL_FAST_ELEMENTS,
+    OBJECT_LITERAL_SLOW_ELEMENTS,
+    ARRAY_LITERAL
+  };
+
+  static bool IsCompileTimeValue(Expression* expression);
+
+  // Get the value as a compile time value.
+  static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
+
+  // Get the type of a compile time value returned by GetValue().
+  static LiteralType GetLiteralType(Handle<FixedArray> value);
+
+  // Get the elements array of a compile time value returned by GetValue().
+  static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+
+ private:
+  static const int kLiteralTypeSlot = 0;
+  static const int kElementsSlot = 1;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_AST_COMPILE_TIME_VALUE
diff --git a/src/ast/context-slot-cache.cc b/src/ast/context-slot-cache.cc
index 43bd6d6..b1387e1 100644
--- a/src/ast/context-slot-cache.cc
+++ b/src/ast/context-slot-cache.cc
@@ -8,6 +8,13 @@
 
 #include "src/ast/scopes.h"
 #include "src/bootstrapper.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ast/context-slot-cache.h b/src/ast/context-slot-cache.h
index 8e9d1f7..4345a65 100644
--- a/src/ast/context-slot-cache.h
+++ b/src/ast/context-slot-cache.h
@@ -7,7 +7,6 @@
 
 #include "src/allocation.h"
 #include "src/ast/modules.h"
-#include "src/ast/variables.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ast/modules.cc b/src/ast/modules.cc
index cd47c00..2d28d55 100644
--- a/src/ast/modules.cc
+++ b/src/ast/modules.cc
@@ -12,49 +12,35 @@
 void ModuleDescriptor::AddImport(
     const AstRawString* import_name, const AstRawString* local_name,
     const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
-  DCHECK_NOT_NULL(import_name);
-  DCHECK_NOT_NULL(local_name);
-  DCHECK_NOT_NULL(module_request);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
+  Entry* entry = new (zone) Entry(loc);
   entry->local_name = local_name;
   entry->import_name = import_name;
-  entry->module_request = module_request;
-  regular_imports_.insert(std::make_pair(entry->local_name, entry));
-  // We don't care if there's already an entry for this local name, as in that
-  // case we will report an error when declaring the variable.
+  entry->module_request = AddModuleRequest(module_request);
+  AddRegularImport(entry);
 }
 
 
 void ModuleDescriptor::AddStarImport(
     const AstRawString* local_name, const AstRawString* module_request,
     Scanner::Location loc, Zone* zone) {
-  DCHECK_NOT_NULL(local_name);
-  DCHECK_NOT_NULL(module_request);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
+  Entry* entry = new (zone) Entry(loc);
   entry->local_name = local_name;
-  entry->module_request = module_request;
-  special_imports_.Add(entry, zone);
+  entry->module_request = AddModuleRequest(module_request);
+  AddNamespaceImport(entry, zone);
 }
 
-
-void ModuleDescriptor::AddEmptyImport(
-    const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
-  DCHECK_NOT_NULL(module_request);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
-  entry->module_request = module_request;
-  special_imports_.Add(entry, zone);
+void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request) {
+  AddModuleRequest(module_request);
 }
 
 
 void ModuleDescriptor::AddExport(
     const AstRawString* local_name, const AstRawString* export_name,
     Scanner::Location loc, Zone* zone) {
-  DCHECK_NOT_NULL(local_name);
-  DCHECK_NOT_NULL(export_name);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
+  Entry* entry = new (zone) Entry(loc);
   entry->export_name = export_name;
   entry->local_name = local_name;
-  exports_.Add(entry, zone);
+  AddRegularExport(entry);
 }
 
 
@@ -63,40 +49,186 @@
     const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
   DCHECK_NOT_NULL(import_name);
   DCHECK_NOT_NULL(export_name);
-  DCHECK_NOT_NULL(module_request);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
+  Entry* entry = new (zone) Entry(loc);
   entry->export_name = export_name;
   entry->import_name = import_name;
-  entry->module_request = module_request;
-  exports_.Add(entry, zone);
+  entry->module_request = AddModuleRequest(module_request);
+  AddSpecialExport(entry, zone);
 }
 
 
 void ModuleDescriptor::AddStarExport(
     const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
-  DCHECK_NOT_NULL(module_request);
-  ModuleEntry* entry = new (zone) ModuleEntry(loc);
-  entry->module_request = module_request;
-  exports_.Add(entry, zone);
+  Entry* entry = new (zone) Entry(loc);
+  entry->module_request = AddModuleRequest(module_request);
+  AddSpecialExport(entry, zone);
 }
 
-void ModuleDescriptor::MakeIndirectExportsExplicit() {
-  for (auto entry : exports_) {
-    if (entry->export_name == nullptr) continue;
-    if (entry->import_name != nullptr) continue;
-    DCHECK_NOT_NULL(entry->local_name);
-    auto it = regular_imports_.find(entry->local_name);
-    if (it != regular_imports_.end()) {
-      // Found an indirect export.
-      DCHECK_NOT_NULL(it->second->module_request);
-      DCHECK_NOT_NULL(it->second->import_name);
-      entry->import_name = it->second->import_name;
-      entry->module_request = it->second->module_request;
-      entry->local_name = nullptr;
+namespace {
+
+Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
+  return (s == nullptr)
+             ? Handle<Object>::cast(isolate->factory()->undefined_value())
+             : Handle<Object>::cast(s->string());
+}
+
+const AstRawString* FromStringOrUndefined(Isolate* isolate,
+                                          AstValueFactory* avfactory,
+                                          Handle<Object> object) {
+  if (object->IsUndefined(isolate)) return nullptr;
+  return avfactory->GetString(Handle<String>::cast(object));
+}
+
+}  // namespace
+
+Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
+    Isolate* isolate) const {
+  CHECK(Smi::IsValid(module_request));  // TODO(neis): Check earlier?
+  return ModuleInfoEntry::New(
+      isolate, ToStringOrUndefined(isolate, export_name),
+      ToStringOrUndefined(isolate, local_name),
+      ToStringOrUndefined(isolate, import_name),
+      Handle<Object>(Smi::FromInt(module_request), isolate));
+}
+
+ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
+    Isolate* isolate, AstValueFactory* avfactory,
+    Handle<ModuleInfoEntry> entry) {
+  Entry* result = new (avfactory->zone()) Entry(Scanner::Location::invalid());
+  result->export_name = FromStringOrUndefined(
+      isolate, avfactory, handle(entry->export_name(), isolate));
+  result->local_name = FromStringOrUndefined(
+      isolate, avfactory, handle(entry->local_name(), isolate));
+  result->import_name = FromStringOrUndefined(
+      isolate, avfactory, handle(entry->import_name(), isolate));
+  result->module_request = Smi::cast(entry->module_request())->value();
+  return result;
+}
+
+Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
+                                                             Zone* zone) const {
+  // We serialize regular exports in a way that lets us later iterate over their
+  // local names and for each local name immediately access all its export
+  // names.  (Regular exports have neither import name nor module request.)
+
+  ZoneVector<Handle<Object>> data(zone);
+  data.reserve(2 * regular_exports_.size());
+
+  for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+    // Find out how many export names this local name has.
+    auto next = it;
+    int size = 0;
+    do {
+      ++next;
+      ++size;
+    } while (next != regular_exports_.end() && next->first == it->first);
+
+    Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(size);
+    data.push_back(it->second->local_name->string());
+    data.push_back(export_names);
+
+    // Collect the export names.
+    int i = 0;
+    for (; it != next; ++it) {
+      export_names->set(i++, *it->second->export_name->string());
+    }
+    DCHECK_EQ(i, size);
+
+    // Continue with the next distinct key.
+    DCHECK(it == next);
+  }
+
+  // We cannot create the FixedArray earlier because we only now know the
+  // precise size (the number of unique keys in regular_exports).
+  int size = static_cast<int>(data.size());
+  Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
+  for (int i = 0; i < size; ++i) {
+    result->set(i, *data[i]);
+  }
+  return result;
+}
+
+void ModuleDescriptor::DeserializeRegularExports(Isolate* isolate,
+                                                 AstValueFactory* avfactory,
+                                                 Handle<FixedArray> data) {
+  for (int i = 0, length_i = data->length(); i < length_i;) {
+    Handle<String> local_name(String::cast(data->get(i++)), isolate);
+    Handle<FixedArray> export_names(FixedArray::cast(data->get(i++)), isolate);
+
+    for (int j = 0, length_j = export_names->length(); j < length_j; ++j) {
+      Handle<String> export_name(String::cast(export_names->get(j)), isolate);
+
+      Entry* entry =
+          new (avfactory->zone()) Entry(Scanner::Location::invalid());
+      entry->local_name = avfactory->GetString(local_name);
+      entry->export_name = avfactory->GetString(export_name);
+
+      AddRegularExport(entry);
     }
   }
 }
 
+void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
+  for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+    Entry* entry = it->second;
+    DCHECK_NOT_NULL(entry->local_name);
+    auto import = regular_imports_.find(entry->local_name);
+    if (import != regular_imports_.end()) {
+      // Found an indirect export.  Patch export entry and move it from regular
+      // to special.
+      DCHECK_NULL(entry->import_name);
+      DCHECK_LT(entry->module_request, 0);
+      DCHECK_NOT_NULL(import->second->import_name);
+      DCHECK_LE(0, import->second->module_request);
+      DCHECK_LT(import->second->module_request,
+                static_cast<int>(module_requests_.size()));
+      entry->import_name = import->second->import_name;
+      entry->module_request = import->second->module_request;
+      entry->local_name = nullptr;
+      AddSpecialExport(entry, zone);
+      it = regular_exports_.erase(it);
+    } else {
+      it++;
+    }
+  }
+}
+
+namespace {
+
+const ModuleDescriptor::Entry* BetterDuplicate(
+    const ModuleDescriptor::Entry* candidate,
+    ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*>& export_names,
+    const ModuleDescriptor::Entry* current_duplicate) {
+  DCHECK_NOT_NULL(candidate->export_name);
+  DCHECK(candidate->location.IsValid());
+  auto insert_result =
+      export_names.insert(std::make_pair(candidate->export_name, candidate));
+  if (insert_result.second) return current_duplicate;
+  if (current_duplicate == nullptr) {
+    current_duplicate = insert_result.first->second;
+  }
+  return (candidate->location.beg_pos > current_duplicate->location.beg_pos)
+             ? candidate
+             : current_duplicate;
+}
+
+}  // namespace
+
+const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
+    Zone* zone) const {
+  const ModuleDescriptor::Entry* duplicate = nullptr;
+  ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*> export_names(
+      zone);
+  for (const auto& elem : regular_exports_) {
+    duplicate = BetterDuplicate(elem.second, export_names, duplicate);
+  }
+  for (auto entry : special_exports_) {
+    if (entry->export_name == nullptr) continue;  // Star export.
+    duplicate = BetterDuplicate(entry, export_names, duplicate);
+  }
+  return duplicate;
+}
+
 bool ModuleDescriptor::Validate(ModuleScope* module_scope,
                                 PendingCompilationErrorHandler* error_handler,
                                 Zone* zone) {
@@ -105,29 +237,19 @@
 
   // Report error iff there are duplicate exports.
   {
-    ZoneAllocationPolicy allocator(zone);
-    ZoneHashMap* export_names = new (zone->New(sizeof(ZoneHashMap)))
-        ZoneHashMap(ZoneHashMap::PointersMatch,
-                    ZoneHashMap::kDefaultHashMapCapacity, allocator);
-    for (auto entry : exports_) {
-      if (entry->export_name == nullptr) continue;
-      AstRawString* key = const_cast<AstRawString*>(entry->export_name);
-      ZoneHashMap::Entry* p =
-          export_names->LookupOrInsert(key, key->hash(), allocator);
-      DCHECK_NOT_NULL(p);
-      if (p->value != nullptr) {
-        error_handler->ReportMessageAt(
-            entry->location.beg_pos, entry->location.end_pos,
-            MessageTemplate::kDuplicateExport, entry->export_name);
-        return false;
-      }
-      p->value = key;  // Anything but nullptr.
+    const Entry* entry = FindDuplicateExport(zone);
+    if (entry != nullptr) {
+      error_handler->ReportMessageAt(
+          entry->location.beg_pos, entry->location.end_pos,
+          MessageTemplate::kDuplicateExport, entry->export_name);
+      return false;
     }
   }
 
   // Report error iff there are exports of non-existent local names.
-  for (auto entry : exports_) {
-    if (entry->local_name == nullptr) continue;
+  for (const auto& elem : regular_exports_) {
+    const Entry* entry = elem.second;
+    DCHECK_NOT_NULL(entry->local_name);
     if (module_scope->LookupLocal(entry->local_name) == nullptr) {
       error_handler->ReportMessageAt(
           entry->location.beg_pos, entry->location.end_pos,
@@ -136,7 +258,7 @@
     }
   }
 
-  MakeIndirectExportsExplicit();
+  MakeIndirectExportsExplicit(zone);
   return true;
 }
 
diff --git a/src/ast/modules.h b/src/ast/modules.h
index c8f7aa3..4d36735 100644
--- a/src/ast/modules.h
+++ b/src/ast/modules.h
@@ -7,19 +7,26 @@
 
 #include "src/parsing/scanner.h"  // Only for Scanner::Location.
 #include "src/pending-compilation-error-handler.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 
 
 class AstRawString;
-
+class ModuleInfoEntry;
 
 class ModuleDescriptor : public ZoneObject {
  public:
   explicit ModuleDescriptor(Zone* zone)
-      : exports_(1, zone), special_imports_(1, zone), regular_imports_(zone) {}
+      : module_requests_(zone),
+        special_exports_(1, zone),
+        namespace_imports_(1, zone),
+        regular_exports_(zone),
+        regular_imports_(zone) {}
+
+  // The following Add* methods are high-level convenience functions for use by
+  // the parser.
 
   // import x from "foo.js";
   // import {x} from "foo.js";
@@ -37,9 +44,7 @@
   // import "foo.js";
   // import {} from "foo.js";
   // export {} from "foo.js";  (sic!)
-  void AddEmptyImport(
-      const AstRawString* module_request, const Scanner::Location loc,
-      Zone* zone);
+  void AddEmptyImport(const AstRawString* module_request);
 
   // export {x};
   // export {x as y};
@@ -67,38 +72,107 @@
   bool Validate(ModuleScope* module_scope,
                 PendingCompilationErrorHandler* error_handler, Zone* zone);
 
-  struct ModuleEntry : public ZoneObject {
+  struct Entry : public ZoneObject {
     const Scanner::Location location;
     const AstRawString* export_name;
     const AstRawString* local_name;
     const AstRawString* import_name;
-    const AstRawString* module_request;
+    // The module_request value records the order in which modules are
+    // requested. It also functions as an index into the ModuleInfo's array of
+    // module specifiers and into the Module's array of requested modules.  A
+    // negative value means no module request.
+    int module_request;
 
-    explicit ModuleEntry(Scanner::Location loc)
+    // TODO(neis): Remove local_name component?
+    explicit Entry(Scanner::Location loc)
         : location(loc),
           export_name(nullptr),
           local_name(nullptr),
           import_name(nullptr),
-          module_request(nullptr) {}
+          module_request(-1) {}
+
+    // (De-)serialization support.
+    // Note that the location value is not preserved as it's only needed by the
+    // parser.  (A Deserialize'd entry has an invalid location.)
+    Handle<ModuleInfoEntry> Serialize(Isolate* isolate) const;
+    static Entry* Deserialize(Isolate* isolate, AstValueFactory* avfactory,
+                              Handle<ModuleInfoEntry> entry);
   };
 
-  const ZoneList<ModuleEntry*>& exports() const { return exports_; }
+  // Module requests.
+  const ZoneMap<const AstRawString*, int>& module_requests() const {
+    return module_requests_;
+  }
 
-  // Empty imports and namespace imports.
-  const ZoneList<const ModuleEntry*>& special_imports() const {
-    return special_imports_;
+  // Namespace imports.
+  const ZoneList<const Entry*>& namespace_imports() const {
+    return namespace_imports_;
   }
 
   // All the remaining imports, indexed by local name.
-  const ZoneMap<const AstRawString*, const ModuleEntry*>& regular_imports()
-      const {
+  const ZoneMap<const AstRawString*, const Entry*>& regular_imports() const {
     return regular_imports_;
   }
 
+  // Star exports and explicitly indirect exports.
+  const ZoneList<const Entry*>& special_exports() const {
+    return special_exports_;
+  }
+
+  // All the remaining exports, indexed by local name.
+  // After canonicalization (see Validate), these are exactly the local exports.
+  const ZoneMultimap<const AstRawString*, Entry*>& regular_exports() const {
+    return regular_exports_;
+  }
+
+  void AddRegularExport(Entry* entry) {
+    DCHECK_NOT_NULL(entry->export_name);
+    DCHECK_NOT_NULL(entry->local_name);
+    DCHECK_NULL(entry->import_name);
+    DCHECK_LT(entry->module_request, 0);
+    regular_exports_.insert(std::make_pair(entry->local_name, entry));
+  }
+
+  void AddSpecialExport(const Entry* entry, Zone* zone) {
+    DCHECK_NULL(entry->local_name);
+    DCHECK_LE(0, entry->module_request);
+    special_exports_.Add(entry, zone);
+  }
+
+  void AddRegularImport(const Entry* entry) {
+    DCHECK_NOT_NULL(entry->import_name);
+    DCHECK_NOT_NULL(entry->local_name);
+    DCHECK_NULL(entry->export_name);
+    DCHECK_LE(0, entry->module_request);
+    regular_imports_.insert(std::make_pair(entry->local_name, entry));
+    // We don't care if there's already an entry for this local name, as in that
+    // case we will report an error when declaring the variable.
+  }
+
+  void AddNamespaceImport(const Entry* entry, Zone* zone) {
+    DCHECK_NULL(entry->import_name);
+    DCHECK_NULL(entry->export_name);
+    DCHECK_NOT_NULL(entry->local_name);
+    DCHECK_LE(0, entry->module_request);
+    namespace_imports_.Add(entry, zone);
+  }
+
+  Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
+                                             Zone* zone) const;
+  void DeserializeRegularExports(Isolate* isolate, AstValueFactory* avfactory,
+                                 Handle<FixedArray> data);
+
  private:
-  ZoneList<ModuleEntry*> exports_;
-  ZoneList<const ModuleEntry*> special_imports_;
-  ZoneMap<const AstRawString*, const ModuleEntry*> regular_imports_;
+  // TODO(neis): Use STL datastructure instead of ZoneList?
+  ZoneMap<const AstRawString*, int> module_requests_;
+  ZoneList<const Entry*> special_exports_;
+  ZoneList<const Entry*> namespace_imports_;
+  ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
+  ZoneMap<const AstRawString*, const Entry*> regular_imports_;
+
+  // If there are multiple export entries with the same export name, return the
+  // last of them (in source order).  Otherwise return nullptr.
+  const Entry* FindDuplicateExport(Zone* zone) const;
 
   // Find any implicitly indirect exports and make them explicit.
   //
@@ -116,7 +190,15 @@
   // into:
   //   import {a as b} from "X"; export {a as c} from "X";
   // (The import entry is never deleted.)
-  void MakeIndirectExportsExplicit();
+  void MakeIndirectExportsExplicit(Zone* zone);
+
+  int AddModuleRequest(const AstRawString* specifier) {
+    DCHECK_NOT_NULL(specifier);
+    auto it = module_requests_
+                  .insert(std::make_pair(specifier, module_requests_.size()))
+                  .first;
+    return it->second;
+  }
 };
 
 }  // namespace internal
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index f19ee23..874c159 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -9,6 +9,7 @@
 #include "src/ast/ast-value-factory.h"
 #include "src/ast/scopes.h"
 #include "src/base/platform/platform.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -603,8 +604,8 @@
     PrintLiteralIndented(info, value, true);
   } else {
     EmbeddedVector<char, 256> buf;
-    int pos = SNPrintF(buf, "%s (mode = %s", info,
-                       Variable::Mode2String(var->mode()));
+    int pos =
+        SNPrintF(buf, "%s (mode = %s", info, VariableMode2String(var->mode()));
     SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
@@ -870,6 +871,9 @@
     case HandlerTable::DESUGARING:
       prediction = "DESUGARING";
       break;
+    case HandlerTable::ASYNC_AWAIT:
+      prediction = "ASYNC_AWAIT";
+      break;
   }
   Print(" %s\n", prediction);
 }
@@ -897,34 +901,27 @@
   if (node->extends() != nullptr) {
     PrintIndentedVisit("EXTENDS", node->extends());
   }
-  PrintProperties(node->properties());
+  PrintClassProperties(node->properties());
 }
 
-
-void AstPrinter::PrintProperties(
-    ZoneList<ObjectLiteral::Property*>* properties) {
+void AstPrinter::PrintClassProperties(
+    ZoneList<ClassLiteral::Property*>* properties) {
   for (int i = 0; i < properties->length(); i++) {
-    ObjectLiteral::Property* property = properties->at(i);
+    ClassLiteral::Property* property = properties->at(i);
     const char* prop_kind = nullptr;
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        prop_kind = "CONSTANT";
+      case ClassLiteral::Property::METHOD:
+        prop_kind = "METHOD";
         break;
-      case ObjectLiteral::Property::COMPUTED:
-        prop_kind = "COMPUTED";
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        prop_kind = "MATERIALIZED_LITERAL";
-        break;
-      case ObjectLiteral::Property::PROTOTYPE:
-        prop_kind = "PROTOTYPE";
-        break;
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         prop_kind = "GETTER";
         break;
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         prop_kind = "SETTER";
         break;
+      case ClassLiteral::Property::FIELD:
+        prop_kind = "FIELD";
+        break;
     }
     EmbeddedVector<char, 128> buf;
     SNPrintF(buf, "PROPERTY%s - %s", property->is_static() ? " - STATIC" : "",
@@ -986,7 +983,40 @@
   EmbeddedVector<char, 128> buf;
   SNPrintF(buf, "literal_index = %d\n", node->literal_index());
   PrintIndented(buf.start());
-  PrintProperties(node->properties());
+  PrintObjectProperties(node->properties());
+}
+
+void AstPrinter::PrintObjectProperties(
+    ZoneList<ObjectLiteral::Property*>* properties) {
+  for (int i = 0; i < properties->length(); i++) {
+    ObjectLiteral::Property* property = properties->at(i);
+    const char* prop_kind = nullptr;
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        prop_kind = "CONSTANT";
+        break;
+      case ObjectLiteral::Property::COMPUTED:
+        prop_kind = "COMPUTED";
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        prop_kind = "MATERIALIZED_LITERAL";
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        prop_kind = "PROTOTYPE";
+        break;
+      case ObjectLiteral::Property::GETTER:
+        prop_kind = "GETTER";
+        break;
+      case ObjectLiteral::Property::SETTER:
+        prop_kind = "SETTER";
+        break;
+    }
+    EmbeddedVector<char, 128> buf;
+    SNPrintF(buf, "PROPERTY - %s", prop_kind);
+    IndentedScope prop(this, buf.start());
+    PrintIndentedVisit("KEY", properties->at(i)->key());
+    PrintIndentedVisit("VALUE", properties->at(i)->value());
+  }
 }
 
 
@@ -1028,9 +1058,6 @@
       case VariableLocation::CONTEXT:
         SNPrintF(buf + pos, " context[%d]", var->index());
         break;
-      case VariableLocation::GLOBAL:
-        SNPrintF(buf + pos, " global[%d]", var->index());
-        break;
       case VariableLocation::LOOKUP:
         SNPrintF(buf + pos, " lookup");
         break;
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index 9b0e22a..2d553ba 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -93,7 +93,8 @@
                                     Variable* var,
                                     Handle<Object> value);
   void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
-  void PrintProperties(ZoneList<ObjectLiteral::Property*>* properties);
+  void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
+  void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
   void PrintTryStatement(TryStatement* try_statement);
 
   void inc_indent() { indent_++; }
diff --git a/src/ast/scopeinfo.cc b/src/ast/scopeinfo.cc
index 7189de3..5354b8d 100644
--- a/src/ast/scopeinfo.cc
+++ b/src/ast/scopeinfo.cc
@@ -2,33 +2,92 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/ast/scopeinfo.h"
-
 #include <stdlib.h>
 
 #include "src/ast/context-slot-cache.h"
 #include "src/ast/scopes.h"
+#include "src/ast/variables.h"
 #include "src/bootstrapper.h"
 
 namespace v8 {
 namespace internal {
 
+// An entry in ModuleVariableEntries consists of several slots:
+enum ModuleVariableEntryOffset {
+  kModuleVariableNameOffset,
+  kModuleVariableIndexOffset,
+  kModuleVariablePropertiesOffset,
+  kModuleVariableEntryLength  // Sentinel value.
+};
 
-Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
-                                    Scope* scope) {
-  // Collect stack and context locals.
-  ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
-  ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
-  ZoneList<Variable*> context_globals(scope->ContextGlobalCount(), zone);
+#ifdef DEBUG
+bool ScopeInfo::Equals(ScopeInfo* other) const {
+  if (length() != other->length()) return false;
+  for (int index = 0; index < length(); ++index) {
+    Object* entry = get(index);
+    Object* other_entry = other->get(index);
+    if (entry->IsSmi()) {
+      if (entry != other_entry) return false;
+    } else {
+      if (HeapObject::cast(entry)->map()->instance_type() !=
+          HeapObject::cast(other_entry)->map()->instance_type()) {
+        return false;
+      }
+      if (entry->IsString()) {
+        if (!String::cast(entry)->Equals(String::cast(other_entry))) {
+          return false;
+        }
+      } else if (entry->IsScopeInfo()) {
+        if (!ScopeInfo::cast(entry)->Equals(ScopeInfo::cast(other_entry))) {
+          return false;
+        }
+      } else if (entry->IsModuleInfo()) {
+        if (!ModuleInfo::cast(entry)->Equals(ModuleInfo::cast(other_entry))) {
+          return false;
+        }
+      } else {
+        UNREACHABLE();
+        return false;
+      }
+    }
+  }
+  return true;
+}
+#endif
 
-  scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
-                                      &context_globals);
-  const int stack_local_count = stack_locals.length();
-  const int context_local_count = context_locals.length();
-  const int context_global_count = context_globals.length();
+Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
+                                    MaybeHandle<ScopeInfo> outer_scope) {
+  // Collect variables.
+  ZoneList<Variable*>* locals = scope->locals();
+  int stack_local_count = 0;
+  int context_local_count = 0;
+  int module_vars_count = 0;
+  // Stack allocated block scope variables are allocated in the parent
+  // declaration scope, but are recorded in the block scope's scope info. First
+  // slot index indicates at which offset a particular scope starts in the
+  // parent declaration scope.
+  int first_slot_index = 0;
+  for (int i = 0; i < locals->length(); i++) {
+    Variable* var = locals->at(i);
+    switch (var->location()) {
+      case VariableLocation::LOCAL:
+        if (stack_local_count == 0) first_slot_index = var->index();
+        stack_local_count++;
+        break;
+      case VariableLocation::CONTEXT:
+        context_local_count++;
+        break;
+      case VariableLocation::MODULE:
+        module_vars_count++;
+        break;
+      default:
+        break;
+    }
+  }
+  DCHECK(module_vars_count == 0 || scope->is_module_scope());
+
   // Make sure we allocate the correct amount.
   DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
-  DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
 
   // Determine use and location of the "this" binding if it is present.
   VariableAllocationInfo receiver_info;
@@ -53,7 +112,6 @@
 
   // Determine use and location of the function variable if it is present.
   VariableAllocationInfo function_name_info;
-  VariableMode function_variable_mode;
   if (scope->is_function_scope() &&
       scope->AsDeclarationScope()->function_var() != nullptr) {
     Variable* var = scope->AsDeclarationScope()->function_var();
@@ -65,20 +123,21 @@
       DCHECK(var->IsStackLocal());
       function_name_info = STACK;
     }
-    function_variable_mode = var->mode();
   } else {
     function_name_info = NONE;
-    function_variable_mode = VAR;
   }
-  DCHECK(context_global_count == 0 || scope->scope_type() == SCRIPT_SCOPE);
 
   const bool has_function_name = function_name_info != NONE;
   const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
   const int parameter_count = scope->num_parameters();
+  const bool has_outer_scope_info = !outer_scope.is_null();
   const int length = kVariablePartIndex + parameter_count +
                      (1 + stack_local_count) + 2 * context_local_count +
-                     2 * context_global_count +
-                     (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
+                     (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
+                     (has_outer_scope_info ? 1 : 0) +
+                     (scope->is_module_scope()
+                          ? 2 + kModuleVariableEntryLength * module_vars_count
+                          : 0);
 
   Factory* factory = isolate->factory();
   Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -96,27 +155,29 @@
   }
 
   // Encode the flags.
-  int flags = ScopeTypeField::encode(scope->scope_type()) |
-              CallsEvalField::encode(scope->calls_eval()) |
-              LanguageModeField::encode(scope->language_mode()) |
-              DeclarationScopeField::encode(scope->is_declaration_scope()) |
-              ReceiverVariableField::encode(receiver_info) |
-              HasNewTargetField::encode(has_new_target) |
-              FunctionVariableField::encode(function_name_info) |
-              FunctionVariableMode::encode(function_variable_mode) |
-              AsmModuleField::encode(asm_module) |
-              AsmFunctionField::encode(asm_function) |
-              HasSimpleParametersField::encode(has_simple_parameters) |
-              FunctionKindField::encode(function_kind);
+  int flags =
+      ScopeTypeField::encode(scope->scope_type()) |
+      CallsEvalField::encode(scope->calls_eval()) |
+      LanguageModeField::encode(scope->language_mode()) |
+      DeclarationScopeField::encode(scope->is_declaration_scope()) |
+      ReceiverVariableField::encode(receiver_info) |
+      HasNewTargetField::encode(has_new_target) |
+      FunctionVariableField::encode(function_name_info) |
+      AsmModuleField::encode(asm_module) |
+      AsmFunctionField::encode(asm_function) |
+      HasSimpleParametersField::encode(has_simple_parameters) |
+      FunctionKindField::encode(function_kind) |
+      HasOuterScopeInfoField::encode(has_outer_scope_info) |
+      IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope());
   scope_info->SetFlags(flags);
+
   scope_info->SetParameterCount(parameter_count);
   scope_info->SetStackLocalCount(stack_local_count);
   scope_info->SetContextLocalCount(context_local_count);
-  scope_info->SetContextGlobalCount(context_global_count);
 
   int index = kVariablePartIndex;
   // Add parameters.
-  DCHECK(index == scope_info->ParameterEntriesIndex());
+  DCHECK_EQ(index, scope_info->ParameterNamesIndex());
   if (scope->is_declaration_scope()) {
     for (int i = 0; i < parameter_count; ++i) {
       scope_info->set(index++,
@@ -124,68 +185,66 @@
     }
   }
 
-  // Add stack locals' names. We are assuming that the stack locals'
-  // slots are allocated in increasing order, so we can simply add
-  // them to the ScopeInfo object.
-  int first_slot_index;
-  if (stack_local_count > 0) {
-    first_slot_index = stack_locals[0]->index();
-  } else {
-    first_slot_index = 0;
-  }
-  DCHECK(index == scope_info->StackLocalFirstSlotIndex());
+  // Add stack locals' names, context locals' names and info, module variables'
+  // names and info. We are assuming that the stack locals' slots are allocated
+  // in increasing order, so we can simply add them to the ScopeInfo object.
+  // Context locals are added using their index.
+  DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
   scope_info->set(index++, Smi::FromInt(first_slot_index));
-  DCHECK(index == scope_info->StackLocalEntriesIndex());
-  for (int i = 0; i < stack_local_count; ++i) {
-    DCHECK(stack_locals[i]->index() == first_slot_index + i);
-    scope_info->set(index++, *stack_locals[i]->name());
+  DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
+
+  int stack_local_base = index;
+  int context_local_base = stack_local_base + stack_local_count;
+  int context_local_info_base = context_local_base + context_local_count;
+  int module_var_entry = scope_info->ModuleVariablesIndex();
+
+  for (int i = 0; i < locals->length(); ++i) {
+    Variable* var = locals->at(i);
+    switch (var->location()) {
+      case VariableLocation::LOCAL: {
+        int local_index = var->index() - first_slot_index;
+        DCHECK_LE(0, local_index);
+        DCHECK_LT(local_index, stack_local_count);
+        scope_info->set(stack_local_base + local_index, *var->name());
+        break;
+      }
+      case VariableLocation::CONTEXT: {
+        // Due to duplicate parameters, context locals aren't guaranteed to come
+        // in order.
+        int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+        DCHECK_LE(0, local_index);
+        DCHECK_LT(local_index, context_local_count);
+        uint32_t info = VariableModeField::encode(var->mode()) |
+                        InitFlagField::encode(var->initialization_flag()) |
+                        MaybeAssignedFlagField::encode(var->maybe_assigned());
+        scope_info->set(context_local_base + local_index, *var->name());
+        scope_info->set(context_local_info_base + local_index,
+                        Smi::FromInt(info));
+        break;
+      }
+      case VariableLocation::MODULE: {
+        scope_info->set(module_var_entry + kModuleVariableNameOffset,
+                        *var->name());
+        scope_info->set(module_var_entry + kModuleVariableIndexOffset,
+                        Smi::FromInt(var->index()));
+        uint32_t properties =
+            VariableModeField::encode(var->mode()) |
+            InitFlagField::encode(var->initialization_flag()) |
+            MaybeAssignedFlagField::encode(var->maybe_assigned());
+        scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
+                        Smi::FromInt(properties));
+        module_var_entry += kModuleVariableEntryLength;
+        break;
+      }
+      default:
+        break;
+    }
   }
 
-  // Due to usage analysis, context-allocated locals are not necessarily in
-  // increasing order: Some of them may be parameters which are allocated before
-  // the non-parameter locals. When the non-parameter locals are sorted
-  // according to usage, the allocated slot indices may not be in increasing
-  // order with the variable list anymore. Thus, we first need to sort them by
-  // context slot index before adding them to the ScopeInfo object.
-  context_locals.Sort(&Variable::CompareIndex);
-
-  // Add context locals' names.
-  DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
-  for (int i = 0; i < context_local_count; ++i) {
-    scope_info->set(index++, *context_locals[i]->name());
-  }
-
-  // Add context globals' names.
-  DCHECK(index == scope_info->ContextGlobalNameEntriesIndex());
-  for (int i = 0; i < context_global_count; ++i) {
-    scope_info->set(index++, *context_globals[i]->name());
-  }
-
-  // Add context locals' info.
-  DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
-  for (int i = 0; i < context_local_count; ++i) {
-    Variable* var = context_locals[i];
-    uint32_t value =
-        ContextLocalMode::encode(var->mode()) |
-        ContextLocalInitFlag::encode(var->initialization_flag()) |
-        ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
-    scope_info->set(index++, Smi::FromInt(value));
-  }
-
-  // Add context globals' info.
-  DCHECK(index == scope_info->ContextGlobalInfoEntriesIndex());
-  for (int i = 0; i < context_global_count; ++i) {
-    Variable* var = context_globals[i];
-    // TODO(ishell): do we need this kind of info for globals here?
-    uint32_t value =
-        ContextLocalMode::encode(var->mode()) |
-        ContextLocalInitFlag::encode(var->initialization_flag()) |
-        ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
-    scope_info->set(index++, Smi::FromInt(value));
-  }
+  index += stack_local_count + 2 * context_local_count;
 
   // If the receiver is allocated, add its index.
-  DCHECK(index == scope_info->ReceiverEntryIndex());
+  DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
   if (has_receiver) {
     int var_index = scope->AsDeclarationScope()->receiver()->index();
     scope_info->set(index++, Smi::FromInt(var_index));
@@ -194,7 +253,7 @@
   }
 
   // If present, add the function variable name and its index.
-  DCHECK(index == scope_info->FunctionNameEntryIndex());
+  DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
   if (has_function_name) {
     int var_index = scope->AsDeclarationScope()->function_var()->index();
     scope_info->set(index++,
@@ -204,75 +263,130 @@
            var_index == scope_info->ContextLength() - 1);
   }
 
-  DCHECK(index == scope_info->length());
-  DCHECK(scope->num_parameters() == scope_info->ParameterCount());
-  DCHECK(scope->num_heap_slots() == scope_info->ContextLength() ||
-         (scope->num_heap_slots() == kVariablePartIndex &&
-          scope_info->ContextLength() == 0));
+  // If present, add the outer scope info.
+  DCHECK(index == scope_info->OuterScopeInfoIndex());
+  if (has_outer_scope_info) {
+    scope_info->set(index++, *outer_scope.ToHandleChecked());
+  }
+
+  // Module-specific information (only for module scopes).
+  if (scope->is_module_scope()) {
+    Handle<ModuleInfo> module_info =
+        ModuleInfo::New(isolate, zone, scope->AsModuleScope()->module());
+    DCHECK_EQ(index, scope_info->ModuleInfoIndex());
+    scope_info->set(index++, *module_info);
+    DCHECK_EQ(index, scope_info->ModuleVariableCountIndex());
+    scope_info->set(index++, Smi::FromInt(module_vars_count));
+    DCHECK_EQ(index, scope_info->ModuleVariablesIndex());
+    // The variable entries themselves have already been written above.
+    index += kModuleVariableEntryLength * module_vars_count;
+  }
+
+  DCHECK_EQ(index, scope_info->length());
+  DCHECK_EQ(scope->num_parameters(), scope_info->ParameterCount());
+  DCHECK_EQ(scope->num_heap_slots(), scope_info->ContextLength());
   return scope_info;
 }
 
+Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
+    Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope) {
+  const bool has_outer_scope_info = !outer_scope.is_null();
+  const int length = kVariablePartIndex + 1 + (has_outer_scope_info ? 1 : 0);
+
+  Factory* factory = isolate->factory();
+  Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
+
+  // Encode the flags.
+  int flags =
+      ScopeTypeField::encode(WITH_SCOPE) | CallsEvalField::encode(false) |
+      LanguageModeField::encode(SLOPPY) | DeclarationScopeField::encode(false) |
+      ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
+      FunctionVariableField::encode(NONE) | AsmModuleField::encode(false) |
+      AsmFunctionField::encode(false) | HasSimpleParametersField::encode(true) |
+      FunctionKindField::encode(kNormalFunction) |
+      HasOuterScopeInfoField::encode(has_outer_scope_info) |
+      IsDebugEvaluateScopeField::encode(false);
+  scope_info->SetFlags(flags);
+
+  scope_info->SetParameterCount(0);
+  scope_info->SetStackLocalCount(0);
+  scope_info->SetContextLocalCount(0);
+
+  int index = kVariablePartIndex;
+  DCHECK_EQ(index, scope_info->ParameterNamesIndex());
+  DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
+  scope_info->set(index++, Smi::FromInt(0));
+  DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
+  DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
+  DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+  DCHECK(index == scope_info->OuterScopeInfoIndex());
+  if (has_outer_scope_info) {
+    scope_info->set(index++, *outer_scope.ToHandleChecked());
+  }
+  DCHECK_EQ(index, scope_info->length());
+  DCHECK_EQ(0, scope_info->ParameterCount());
+  DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, scope_info->ContextLength());
+  return scope_info;
+}
 
 Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
   DCHECK(isolate->bootstrapper()->IsActive());
 
   const int stack_local_count = 0;
   const int context_local_count = 1;
-  const int context_global_count = 0;
   const bool has_simple_parameters = true;
   const VariableAllocationInfo receiver_info = CONTEXT;
   const VariableAllocationInfo function_name_info = NONE;
-  const VariableMode function_variable_mode = VAR;
   const bool has_function_name = false;
   const bool has_receiver = true;
+  const bool has_outer_scope_info = false;
   const int parameter_count = 0;
   const int length = kVariablePartIndex + parameter_count +
                      (1 + stack_local_count) + 2 * context_local_count +
-                     2 * context_global_count +
-                     (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
+                     (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
+                     (has_outer_scope_info ? 1 : 0);
 
   Factory* factory = isolate->factory();
   Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
 
   // Encode the flags.
-  int flags = ScopeTypeField::encode(SCRIPT_SCOPE) |
-              CallsEvalField::encode(false) |
-              LanguageModeField::encode(SLOPPY) |
-              DeclarationScopeField::encode(true) |
-              ReceiverVariableField::encode(receiver_info) |
-              FunctionVariableField::encode(function_name_info) |
-              FunctionVariableMode::encode(function_variable_mode) |
-              AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
-              HasSimpleParametersField::encode(has_simple_parameters) |
-              FunctionKindField::encode(FunctionKind::kNormalFunction);
+  int flags =
+      ScopeTypeField::encode(SCRIPT_SCOPE) | CallsEvalField::encode(false) |
+      LanguageModeField::encode(SLOPPY) | DeclarationScopeField::encode(true) |
+      ReceiverVariableField::encode(receiver_info) |
+      FunctionVariableField::encode(function_name_info) |
+      AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
+      HasSimpleParametersField::encode(has_simple_parameters) |
+      FunctionKindField::encode(FunctionKind::kNormalFunction) |
+      HasOuterScopeInfoField::encode(has_outer_scope_info) |
+      IsDebugEvaluateScopeField::encode(false);
   scope_info->SetFlags(flags);
   scope_info->SetParameterCount(parameter_count);
   scope_info->SetStackLocalCount(stack_local_count);
   scope_info->SetContextLocalCount(context_local_count);
-  scope_info->SetContextGlobalCount(context_global_count);
 
   int index = kVariablePartIndex;
   const int first_slot_index = 0;
-  DCHECK(index == scope_info->StackLocalFirstSlotIndex());
+  DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
   scope_info->set(index++, Smi::FromInt(first_slot_index));
-  DCHECK(index == scope_info->StackLocalEntriesIndex());
+  DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
 
   // Here we add info for context-allocated "this".
-  DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
+  DCHECK_EQ(index, scope_info->ContextLocalNamesIndex());
   scope_info->set(index++, *isolate->factory()->this_string());
-  DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
-  const uint32_t value = ContextLocalMode::encode(CONST) |
-                         ContextLocalInitFlag::encode(kCreatedInitialized) |
-                         ContextLocalMaybeAssignedFlag::encode(kNotAssigned);
+  DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
+  const uint32_t value = VariableModeField::encode(CONST) |
+                         InitFlagField::encode(kCreatedInitialized) |
+                         MaybeAssignedFlagField::encode(kNotAssigned);
   scope_info->set(index++, Smi::FromInt(value));
 
   // And here we record that this scopeinfo binds a receiver.
-  DCHECK(index == scope_info->ReceiverEntryIndex());
+  DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
   const int receiver_index = Context::MIN_CONTEXT_SLOTS + 0;
   scope_info->set(index++, Smi::FromInt(receiver_index));
 
-  DCHECK(index == scope_info->FunctionNameEntryIndex());
-
+  DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+  DCHECK_EQ(index, scope_info->OuterScopeInfoIndex());
   DCHECK_EQ(index, scope_info->length());
   DCHECK_EQ(scope_info->ParameterCount(), 0);
   DCHECK_EQ(scope_info->ContextLength(), Context::MIN_CONTEXT_SLOTS + 1);
@@ -282,12 +396,12 @@
 
 
 ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
-  return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
+  return isolate->heap()->empty_scope_info();
 }
 
 
 ScopeType ScopeInfo::scope_type() {
-  DCHECK(length() > 0);
+  DCHECK_LT(0, length());
   return ScopeTypeField::decode(Flags());
 }
 
@@ -325,19 +439,17 @@
 int ScopeInfo::ContextLength() {
   if (length() > 0) {
     int context_locals = ContextLocalCount();
-    int context_globals = ContextGlobalCount();
     bool function_name_context_slot =
         FunctionVariableField::decode(Flags()) == CONTEXT;
-    bool has_context = context_locals > 0 || context_globals > 0 ||
-                       function_name_context_slot ||
+    bool has_context = context_locals > 0 || function_name_context_slot ||
                        scope_type() == WITH_SCOPE ||
                        (scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
-                           is_declaration_scope()) ||
+                        is_declaration_scope()) ||
                        (scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
                        scope_type() == MODULE_SCOPE;
 
     if (has_context) {
-      return Context::MIN_CONTEXT_SLOTS + context_locals + context_globals +
+      return Context::MIN_CONTEXT_SLOTS + context_locals +
              (function_name_context_slot ? 1 : 0);
     }
   }
@@ -375,6 +487,30 @@
   }
 }
 
+bool ScopeInfo::HasOuterScopeInfo() {
+  if (length() > 0) {
+    return HasOuterScopeInfoField::decode(Flags());
+  } else {
+    return false;
+  }
+}
+
+bool ScopeInfo::IsDebugEvaluateScope() {
+  if (length() > 0) {
+    return IsDebugEvaluateScopeField::decode(Flags());
+  } else {
+    return false;
+  }
+}
+
+void ScopeInfo::SetIsDebugEvaluateScope() {
+  if (length() > 0) {
+    DCHECK_EQ(scope_type(), WITH_SCOPE);
+    SetFlags(Flags() | IsDebugEvaluateScopeField::encode(true));
+  } else {
+    UNREACHABLE();
+  }
+}
 
 bool ScopeInfo::HasHeapAllocatedLocals() {
   if (length() > 0) {
@@ -392,68 +528,85 @@
 
 String* ScopeInfo::FunctionName() {
   DCHECK(HasFunctionName());
-  return String::cast(get(FunctionNameEntryIndex()));
+  return String::cast(get(FunctionNameInfoIndex()));
 }
 
+ScopeInfo* ScopeInfo::OuterScopeInfo() {
+  DCHECK(HasOuterScopeInfo());
+  return ScopeInfo::cast(get(OuterScopeInfoIndex()));
+}
+
+ModuleInfo* ScopeInfo::ModuleDescriptorInfo() {
+  DCHECK(scope_type() == MODULE_SCOPE);
+  return ModuleInfo::cast(get(ModuleInfoIndex()));
+}
 
 String* ScopeInfo::ParameterName(int var) {
-  DCHECK(0 <= var && var < ParameterCount());
-  int info_index = ParameterEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, ParameterCount());
+  int info_index = ParameterNamesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 String* ScopeInfo::LocalName(int var) {
-  DCHECK(0 <= var && var < LocalCount());
-  DCHECK(StackLocalEntriesIndex() + StackLocalCount() ==
-         ContextLocalNameEntriesIndex());
-  int info_index = StackLocalEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, LocalCount());
+  DCHECK(StackLocalNamesIndex() + StackLocalCount() ==
+         ContextLocalNamesIndex());
+  int info_index = StackLocalNamesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 String* ScopeInfo::StackLocalName(int var) {
-  DCHECK(0 <= var && var < StackLocalCount());
-  int info_index = StackLocalEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, StackLocalCount());
+  int info_index = StackLocalNamesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 int ScopeInfo::StackLocalIndex(int var) {
-  DCHECK(0 <= var && var < StackLocalCount());
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, StackLocalCount());
   int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
   return first_slot_index + var;
 }
 
 
 String* ScopeInfo::ContextLocalName(int var) {
-  DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
-  int info_index = ContextLocalNameEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, ContextLocalCount());
+  int info_index = ContextLocalNamesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 VariableMode ScopeInfo::ContextLocalMode(int var) {
-  DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
-  int info_index = ContextLocalInfoEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, ContextLocalCount());
+  int info_index = ContextLocalInfosIndex() + var;
   int value = Smi::cast(get(info_index))->value();
-  return ContextLocalMode::decode(value);
+  return VariableModeField::decode(value);
 }
 
 
 InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
-  DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
-  int info_index = ContextLocalInfoEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, ContextLocalCount());
+  int info_index = ContextLocalInfosIndex() + var;
   int value = Smi::cast(get(info_index))->value();
-  return ContextLocalInitFlag::decode(value);
+  return InitFlagField::decode(value);
 }
 
 
 MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
-  DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
-  int info_index = ContextLocalInfoEntriesIndex() + var;
+  DCHECK_LE(0, var);
+  DCHECK_LT(var, ContextLocalCount());
+  int info_index = ContextLocalInfosIndex() + var;
   int value = Smi::cast(get(info_index))->value();
-  return ContextLocalMaybeAssignedFlag::decode(value);
+  return MaybeAssignedFlagField::decode(value);
 }
 
 bool ScopeInfo::VariableIsSynthetic(String* name) {
@@ -470,8 +623,8 @@
   DCHECK(name->IsInternalizedString());
   if (length() > 0) {
     int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
-    int start = StackLocalEntriesIndex();
-    int end = StackLocalEntriesIndex() + StackLocalCount();
+    int start = StackLocalNamesIndex();
+    int end = start + StackLocalCount();
     for (int i = start; i < end; ++i) {
       if (name == get(i)) {
         return i - start + first_slot_index;
@@ -481,27 +634,54 @@
   return -1;
 }
 
+int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
+                           InitializationFlag* init_flag,
+                           MaybeAssignedFlag* maybe_assigned_flag) {
+  DCHECK_EQ(scope_type(), MODULE_SCOPE);
+  DCHECK(name->IsInternalizedString());
+  DCHECK_NOT_NULL(mode);
+  DCHECK_NOT_NULL(init_flag);
+  DCHECK_NOT_NULL(maybe_assigned_flag);
+
+  int module_vars_count = Smi::cast(get(ModuleVariableCountIndex()))->value();
+  int entry = ModuleVariablesIndex();
+  for (int i = 0; i < module_vars_count; ++i) {
+    if (*name == get(entry + kModuleVariableNameOffset)) {
+      int index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
+      int properties =
+          Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
+      *mode = VariableModeField::decode(properties);
+      *init_flag = InitFlagField::decode(properties);
+      *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+      return index;
+    }
+    entry += kModuleVariableEntryLength;
+  }
+
+  return -1;
+}
 
 int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
                                 Handle<String> name, VariableMode* mode,
                                 InitializationFlag* init_flag,
                                 MaybeAssignedFlag* maybe_assigned_flag) {
   DCHECK(name->IsInternalizedString());
-  DCHECK(mode != NULL);
-  DCHECK(init_flag != NULL);
+  DCHECK_NOT_NULL(mode);
+  DCHECK_NOT_NULL(init_flag);
+  DCHECK_NOT_NULL(maybe_assigned_flag);
+
   if (scope_info->length() > 0) {
     ContextSlotCache* context_slot_cache =
         scope_info->GetIsolate()->context_slot_cache();
     int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
                                             maybe_assigned_flag);
     if (result != ContextSlotCache::kNotFound) {
-      DCHECK(result < scope_info->ContextLength());
+      DCHECK_LT(result, scope_info->ContextLength());
       return result;
     }
 
-    int start = scope_info->ContextLocalNameEntriesIndex();
-    int end = scope_info->ContextLocalNameEntriesIndex() +
-              scope_info->ContextLocalCount();
+    int start = scope_info->ContextLocalNamesIndex();
+    int end = start + scope_info->ContextLocalCount();
     for (int i = start; i < end; ++i) {
       if (*name == scope_info->get(i)) {
         int var = i - start;
@@ -512,7 +692,7 @@
 
         context_slot_cache->Update(scope_info, name, *mode, *init_flag,
                                    *maybe_assigned_flag, result);
-        DCHECK(result < scope_info->ContextLength());
+        DCHECK_LT(result, scope_info->ContextLength());
         return result;
       }
     }
@@ -520,46 +700,14 @@
     context_slot_cache->Update(scope_info, name, TEMPORARY,
                                kNeedsInitialization, kNotAssigned, -1);
   }
+
   return -1;
 }
 
-
-int ScopeInfo::ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
-                                      Handle<String> name, VariableMode* mode,
-                                      InitializationFlag* init_flag,
-                                      MaybeAssignedFlag* maybe_assigned_flag) {
-  DCHECK(name->IsInternalizedString());
-  DCHECK(mode != NULL);
-  DCHECK(init_flag != NULL);
-  if (scope_info->length() > 0) {
-    // This is to ensure that ContextLocalMode() and co. queries would work.
-    DCHECK_EQ(scope_info->ContextGlobalNameEntriesIndex(),
-              scope_info->ContextLocalNameEntriesIndex() +
-                  scope_info->ContextLocalCount());
-    int base = scope_info->ContextLocalNameEntriesIndex();
-    int start = scope_info->ContextGlobalNameEntriesIndex();
-    int end = scope_info->ContextGlobalNameEntriesIndex() +
-              scope_info->ContextGlobalCount();
-    for (int i = start; i < end; ++i) {
-      if (*name == scope_info->get(i)) {
-        int var = i - base;
-        *mode = scope_info->ContextLocalMode(var);
-        *init_flag = scope_info->ContextLocalInitFlag(var);
-        *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
-        int result = Context::MIN_CONTEXT_SLOTS + var;
-        DCHECK(result < scope_info->ContextLength());
-        return result;
-      }
-    }
-  }
-  return -1;
-}
-
-
 String* ScopeInfo::ContextSlotName(int slot_index) {
   int const var = slot_index - Context::MIN_CONTEXT_SLOTS;
   DCHECK_LE(0, var);
-  DCHECK_LT(var, ContextLocalCount() + ContextGlobalCount());
+  DCHECK_LT(var, ContextLocalCount());
   return ContextLocalName(var);
 }
 
@@ -572,8 +720,8 @@
     // last declaration of that parameter is used
     // inside a function (and thus we need to look
     // at the last index). Was bug# 1110337.
-    int start = ParameterEntriesIndex();
-    int end = ParameterEntriesIndex() + ParameterCount();
+    int start = ParameterNamesIndex();
+    int end = start + ParameterCount();
     for (int i = end - 1; i >= start; --i) {
       if (name == get(i)) {
         return i - start;
@@ -586,19 +734,16 @@
 
 int ScopeInfo::ReceiverContextSlotIndex() {
   if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
-    return Smi::cast(get(ReceiverEntryIndex()))->value();
+    return Smi::cast(get(ReceiverInfoIndex()))->value();
   return -1;
 }
 
-
-int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
+int ScopeInfo::FunctionContextSlotIndex(String* name) {
   DCHECK(name->IsInternalizedString());
-  DCHECK(mode != NULL);
   if (length() > 0) {
     if (FunctionVariableField::decode(Flags()) == CONTEXT &&
         FunctionName() == name) {
-      *mode = FunctionVariableMode::decode(Flags());
-      return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
+      return Smi::cast(get(FunctionNameInfoIndex() + 1))->value();
     }
   }
   return -1;
@@ -609,51 +754,45 @@
   return FunctionKindField::decode(Flags());
 }
 
-
-int ScopeInfo::ParameterEntriesIndex() {
-  DCHECK(length() > 0);
+int ScopeInfo::ParameterNamesIndex() {
+  DCHECK_LT(0, length());
   return kVariablePartIndex;
 }
 
 
 int ScopeInfo::StackLocalFirstSlotIndex() {
-  return ParameterEntriesIndex() + ParameterCount();
+  return ParameterNamesIndex() + ParameterCount();
 }
 
+int ScopeInfo::StackLocalNamesIndex() { return StackLocalFirstSlotIndex() + 1; }
 
-int ScopeInfo::StackLocalEntriesIndex() {
-  return StackLocalFirstSlotIndex() + 1;
+int ScopeInfo::ContextLocalNamesIndex() {
+  return StackLocalNamesIndex() + StackLocalCount();
 }
 
-
-int ScopeInfo::ContextLocalNameEntriesIndex() {
-  return StackLocalEntriesIndex() + StackLocalCount();
+int ScopeInfo::ContextLocalInfosIndex() {
+  return ContextLocalNamesIndex() + ContextLocalCount();
 }
 
-
-int ScopeInfo::ContextGlobalNameEntriesIndex() {
-  return ContextLocalNameEntriesIndex() + ContextLocalCount();
+int ScopeInfo::ReceiverInfoIndex() {
+  return ContextLocalInfosIndex() + ContextLocalCount();
 }
 
-
-int ScopeInfo::ContextLocalInfoEntriesIndex() {
-  return ContextGlobalNameEntriesIndex() + ContextGlobalCount();
+int ScopeInfo::FunctionNameInfoIndex() {
+  return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0);
 }
 
-
-int ScopeInfo::ContextGlobalInfoEntriesIndex() {
-  return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+int ScopeInfo::OuterScopeInfoIndex() {
+  return FunctionNameInfoIndex() + (HasFunctionName() ? 2 : 0);
 }
 
-
-int ScopeInfo::ReceiverEntryIndex() {
-  return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
+int ScopeInfo::ModuleInfoIndex() {
+  return OuterScopeInfoIndex() + (HasOuterScopeInfo() ? 1 : 0);
 }
 
+int ScopeInfo::ModuleVariableCountIndex() { return ModuleInfoIndex() + 1; }
 
-int ScopeInfo::FunctionNameEntryIndex() {
-  return ReceiverEntryIndex() + (HasAllocatedReceiver() ? 1 : 0);
-}
+int ScopeInfo::ModuleVariablesIndex() { return ModuleVariableCountIndex() + 1; }
 
 #ifdef DEBUG
 
@@ -686,19 +825,84 @@
   PrintF("{");
 
   if (length() > 0) {
-    PrintList("parameters", 0, ParameterEntriesIndex(),
-              ParameterEntriesIndex() + ParameterCount(), this);
-    PrintList("stack slots", 0, StackLocalEntriesIndex(),
-              StackLocalEntriesIndex() + StackLocalCount(), this);
+    PrintList("parameters", 0, ParameterNamesIndex(),
+              ParameterNamesIndex() + ParameterCount(), this);
+    PrintList("stack slots", 0, StackLocalNamesIndex(),
+              StackLocalNamesIndex() + StackLocalCount(), this);
     PrintList("context slots", Context::MIN_CONTEXT_SLOTS,
-              ContextLocalNameEntriesIndex(),
-              ContextLocalNameEntriesIndex() + ContextLocalCount(), this);
+              ContextLocalNamesIndex(),
+              ContextLocalNamesIndex() + ContextLocalCount(), this);
+    // TODO(neis): Print module stuff if present.
   }
 
   PrintF("}\n");
 }
 #endif  // DEBUG
 
+Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
+                                             Handle<Object> export_name,
+                                             Handle<Object> local_name,
+                                             Handle<Object> import_name,
+                                             Handle<Object> module_request) {
+  Handle<ModuleInfoEntry> result = isolate->factory()->NewModuleInfoEntry();
+  result->set(kExportNameIndex, *export_name);
+  result->set(kLocalNameIndex, *local_name);
+  result->set(kImportNameIndex, *import_name);
+  result->set(kModuleRequestIndex, *module_request);
+  return result;
+}
+
+Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
+                                   ModuleDescriptor* descr) {
+  // Serialize module requests.
+  Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(
+      static_cast<int>(descr->module_requests().size()));
+  for (const auto& elem : descr->module_requests()) {
+    module_requests->set(elem.second, *elem.first->string());
+  }
+
+  // Serialize special exports.
+  Handle<FixedArray> special_exports =
+      isolate->factory()->NewFixedArray(descr->special_exports().length());
+  {
+    int i = 0;
+    for (auto entry : descr->special_exports()) {
+      special_exports->set(i++, *entry->Serialize(isolate));
+    }
+  }
+
+  // Serialize namespace imports.
+  Handle<FixedArray> namespace_imports =
+      isolate->factory()->NewFixedArray(descr->namespace_imports().length());
+  {
+    int i = 0;
+    for (auto entry : descr->namespace_imports()) {
+      namespace_imports->set(i++, *entry->Serialize(isolate));
+    }
+  }
+
+  // Serialize regular exports.
+  Handle<FixedArray> regular_exports =
+      descr->SerializeRegularExports(isolate, zone);
+
+  // Serialize regular imports.
+  Handle<FixedArray> regular_imports = isolate->factory()->NewFixedArray(
+      static_cast<int>(descr->regular_imports().size()));
+  {
+    int i = 0;
+    for (const auto& elem : descr->regular_imports()) {
+      regular_imports->set(i++, *elem.second->Serialize(isolate));
+    }
+  }
+
+  Handle<ModuleInfo> result = isolate->factory()->NewModuleInfo();
+  result->set(kModuleRequestsIndex, *module_requests);
+  result->set(kSpecialExportsIndex, *special_exports);
+  result->set(kRegularExportsIndex, *regular_exports);
+  result->set(kNamespaceImportsIndex, *namespace_imports);
+  result->set(kRegularImportsIndex, *regular_imports);
+  return result;
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/scopeinfo.h b/src/ast/scopeinfo.h
deleted file mode 100644
index 515c88b..0000000
--- a/src/ast/scopeinfo.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_SCOPEINFO_H_
-#define V8_AST_SCOPEINFO_H_
-
-#include "src/allocation.h"
-#include "src/ast/modules.h"
-#include "src/ast/variables.h"
-
-namespace v8 {
-namespace internal {
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_AST_SCOPEINFO_H_
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index 7689786..c531ef5 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -7,6 +7,7 @@
 #include <set>
 
 #include "src/accessors.h"
+#include "src/ast/ast.h"
 #include "src/bootstrapper.h"
 #include "src/messages.h"
 #include "src/parsing/parse-info.h"
@@ -24,11 +25,11 @@
 //       this is ensured.
 
 VariableMap::VariableMap(Zone* zone)
-    : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
+    : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
 
 Variable* VariableMap::Declare(Zone* zone, Scope* scope,
                                const AstRawString* name, VariableMode mode,
-                               Variable::Kind kind,
+                               VariableKind kind,
                                InitializationFlag initialization_flag,
                                MaybeAssignedFlag maybe_assigned_flag,
                                bool* added) {
@@ -41,13 +42,27 @@
   if (added) *added = p->value == nullptr;
   if (p->value == nullptr) {
     // The variable has not been declared yet -> insert it.
-    DCHECK(p->key == name);
+    DCHECK_EQ(name, p->key);
     p->value = new (zone) Variable(scope, name, mode, kind, initialization_flag,
                                    maybe_assigned_flag);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
 
+void VariableMap::Remove(Variable* var) {
+  const AstRawString* name = var->raw_name();
+  ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
+}
+
+void VariableMap::Add(Zone* zone, Variable* var) {
+  const AstRawString* name = var->raw_name();
+  Entry* p =
+      ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+                                  ZoneAllocationPolicy(zone));
+  DCHECK_NULL(p->value);
+  DCHECK_EQ(name, p->key);
+  p->value = var;
+}
 
 Variable* VariableMap::Lookup(const AstRawString* name) {
   Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash());
@@ -60,7 +75,7 @@
 }
 
 SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
-    : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
+    : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
 
 void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
                                      SloppyBlockFunctionStatement* stmt) {
@@ -81,7 +96,7 @@
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      ordered_variables_(4, zone),
+      locals_(4, zone),
       decls_(4, zone),
       scope_type_(SCRIPT_SCOPE) {
   SetDefaults();
@@ -91,7 +106,7 @@
     : zone_(zone),
       outer_scope_(outer_scope),
       variables_(zone),
-      ordered_variables_(4, zone),
+      locals_(4, zone),
       decls_(4, zone),
       scope_type_(scope_type) {
   DCHECK_NE(SCRIPT_SCOPE, scope_type);
@@ -106,15 +121,21 @@
     : outer_scope_(scope),
       top_inner_scope_(scope->inner_scope_),
       top_unresolved_(scope->unresolved_),
-      top_temp_(scope->GetClosureScope()->temps()->length()) {}
+      top_local_(scope->GetClosureScope()->locals_.length()),
+      top_decl_(scope->GetClosureScope()->decls_.length()) {}
 
-DeclarationScope::DeclarationScope(Zone* zone)
+DeclarationScope::DeclarationScope(Zone* zone,
+                                   AstValueFactory* ast_value_factory)
     : Scope(zone),
       function_kind_(kNormalFunction),
-      temps_(4, zone),
       params_(4, zone),
       sloppy_block_function_map_(zone) {
+  DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
   SetDefaults();
+
+  // Make sure that if we don't find the global 'this', it won't be declared as
+  // a regular dynamic global by predeclaring it with the right variable kind.
+  DeclareDynamicGlobal(ast_value_factory->this_string(), THIS_VARIABLE);
 }
 
 DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
@@ -122,73 +143,117 @@
                                    FunctionKind function_kind)
     : Scope(zone, outer_scope, scope_type),
       function_kind_(function_kind),
-      temps_(4, zone),
       params_(4, zone),
       sloppy_block_function_map_(zone) {
+  DCHECK_NE(scope_type, SCRIPT_SCOPE);
   SetDefaults();
   asm_function_ = outer_scope_->IsAsmModule();
 }
 
-ModuleScope::ModuleScope(Zone* zone, DeclarationScope* script_scope,
+ModuleScope::ModuleScope(DeclarationScope* script_scope,
                          AstValueFactory* ast_value_factory)
-    : DeclarationScope(zone, script_scope, MODULE_SCOPE) {
+    : DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
+                       kModule) {
+  Zone* zone = ast_value_factory->zone();
   module_descriptor_ = new (zone) ModuleDescriptor(zone);
   set_language_mode(STRICT);
   DeclareThis(ast_value_factory);
 }
 
-Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
-             Handle<ScopeInfo> scope_info)
+ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
+                         AstValueFactory* avfactory)
+    : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
+  Zone* zone = avfactory->zone();
+  ModuleInfo* module_info = scope_info->ModuleDescriptorInfo();
+
+  set_language_mode(STRICT);
+  module_descriptor_ = new (zone) ModuleDescriptor(zone);
+
+  // Deserialize special exports.
+  Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+  for (int i = 0, n = special_exports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> serialized_entry(
+        ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+    module_descriptor_->AddSpecialExport(
+        ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
+                                             serialized_entry),
+        avfactory->zone());
+  }
+
+  // Deserialize regular exports.
+  Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
+  module_descriptor_->DeserializeRegularExports(isolate, avfactory,
+                                                regular_exports);
+
+  // Deserialize namespace imports.
+  Handle<FixedArray> namespace_imports(module_info->namespace_imports(),
+                                       isolate);
+  for (int i = 0, n = namespace_imports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> serialized_entry(
+        ModuleInfoEntry::cast(namespace_imports->get(i)), isolate);
+    module_descriptor_->AddNamespaceImport(
+        ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
+                                             serialized_entry),
+        avfactory->zone());
+  }
+
+  // Deserialize regular imports.
+  Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+  for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> serialized_entry(
+        ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+    module_descriptor_->AddRegularImport(ModuleDescriptor::Entry::Deserialize(
+        isolate, avfactory, serialized_entry));
+  }
+}
+
+Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      ordered_variables_(0, zone),
+      locals_(0, zone),
       decls_(0, zone),
       scope_info_(scope_info),
       scope_type_(scope_type) {
+  DCHECK(!scope_info.is_null());
   SetDefaults();
 #ifdef DEBUG
   already_resolved_ = true;
 #endif
-  if (scope_type == WITH_SCOPE) {
-    DCHECK(scope_info.is_null());
-  } else {
-    if (scope_info->CallsEval()) RecordEvalCall();
-    set_language_mode(scope_info->language_mode());
-    num_heap_slots_ = scope_info->ContextLength();
-  }
+  if (scope_info->CallsEval()) RecordEvalCall();
+  set_language_mode(scope_info->language_mode());
+  num_heap_slots_ = scope_info->ContextLength();
   DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
-
-  if (inner_scope != nullptr) AddInnerScope(inner_scope);
 }
 
-DeclarationScope::DeclarationScope(Zone* zone, Scope* inner_scope,
-                                   ScopeType scope_type,
+DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
                                    Handle<ScopeInfo> scope_info)
-    : Scope(zone, inner_scope, scope_type, scope_info),
+    : Scope(zone, scope_type, scope_info),
       function_kind_(scope_info->function_kind()),
-      temps_(0, zone),
       params_(0, zone),
       sloppy_block_function_map_(zone) {
+  DCHECK_NE(scope_type, SCRIPT_SCOPE);
   SetDefaults();
 }
 
-Scope::Scope(Zone* zone, Scope* inner_scope,
-             const AstRawString* catch_variable_name)
+Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
+             Handle<ScopeInfo> scope_info)
     : zone_(zone),
       outer_scope_(nullptr),
       variables_(zone),
-      ordered_variables_(0, zone),
+      locals_(0, zone),
       decls_(0, zone),
+      scope_info_(scope_info),
       scope_type_(CATCH_SCOPE) {
   SetDefaults();
 #ifdef DEBUG
   already_resolved_ = true;
 #endif
-  if (inner_scope != nullptr) AddInnerScope(inner_scope);
-  Variable* variable =
-      variables_.Declare(zone, this, catch_variable_name, VAR, Variable::NORMAL,
-                         kCreatedInitialized);
+  // Cache the catch variable, even though it's also available via the
+  // scope_info, as the parser expects that a catch scope always has the catch
+  // variable as first and only variable.
+  Variable* variable = Declare(zone, this, catch_variable_name, VAR,
+                               NORMAL_VARIABLE, kCreatedInitialized);
   AllocateHeapSlot(variable);
 }
 
@@ -200,31 +265,30 @@
   force_eager_compilation_ = false;
   has_arguments_parameter_ = false;
   scope_uses_super_property_ = false;
+  has_rest_ = false;
   receiver_ = nullptr;
   new_target_ = nullptr;
   function_ = nullptr;
   arguments_ = nullptr;
   this_function_ = nullptr;
   arity_ = 0;
-  rest_index_ = -1;
 }
 
 void Scope::SetDefaults() {
 #ifdef DEBUG
   scope_name_ = nullptr;
   already_resolved_ = false;
+  needs_migration_ = false;
 #endif
   inner_scope_ = nullptr;
   sibling_ = nullptr;
   unresolved_ = nullptr;
-  dynamics_ = nullptr;
 
   start_position_ = kNoSourcePosition;
   end_position_ = kNoSourcePosition;
 
   num_stack_slots_ = 0;
   num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-  num_global_slots_ = 0;
 
   set_language_mode(SLOPPY);
 
@@ -237,6 +301,8 @@
   force_context_allocation_ = false;
 
   is_declaration_scope_ = false;
+
+  is_lazily_parsed_ = false;
 }
 
 bool Scope::HasSimpleParameters() {
@@ -244,6 +310,16 @@
   return !scope->is_function_scope() || scope->has_simple_parameters();
 }
 
+void DeclarationScope::set_asm_module() {
+  asm_module_ = true;
+  // Mark any existing inner function scopes as asm function scopes.
+  for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
+    if (inner->is_function_scope()) {
+      inner->AsDeclarationScope()->set_asm_function();
+    }
+  }
+}
+
 bool Scope::IsAsmModule() const {
   return is_function_scope() && AsDeclarationScope()->asm_module();
 }
@@ -253,137 +329,77 @@
 }
 
 Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
-                                    Context* context,
+                                    ScopeInfo* scope_info,
                                     DeclarationScope* script_scope,
                                     AstValueFactory* ast_value_factory,
                                     DeserializationMode deserialization_mode) {
   // Reconstruct the outer scope chain from a closure's context chain.
   Scope* current_scope = nullptr;
   Scope* innermost_scope = nullptr;
-  while (!context->IsNativeContext()) {
-    if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
+  Scope* outer_scope = nullptr;
+  while (scope_info) {
+    if (scope_info->scope_type() == WITH_SCOPE) {
       // For scope analysis, debug-evaluate is equivalent to a with scope.
-      Scope* with_scope = new (zone)
-          Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>());
+      outer_scope = new (zone) Scope(zone, WITH_SCOPE, handle(scope_info));
+
       // TODO(yangguo): Remove once debug-evaluate properly keeps track of the
       // function scope in which we are evaluating.
-      if (context->IsDebugEvaluateContext()) {
-        with_scope->set_is_debug_evaluate_scope();
+      if (scope_info->IsDebugEvaluateScope()) {
+        outer_scope->set_is_debug_evaluate_scope();
       }
-      current_scope = with_scope;
-    } else if (context->IsScriptContext()) {
-      Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
-      DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
-      current_scope = new (zone)
-          DeclarationScope(zone, current_scope, SCRIPT_SCOPE, scope_info);
-    } else if (context->IsFunctionContext()) {
-      Handle<ScopeInfo> scope_info(context->closure()->shared()->scope_info(),
-                                   isolate);
+    } else if (scope_info->scope_type() == SCRIPT_SCOPE) {
+      // If we reach a script scope, it's the outermost scope. Install the
+      // scope info of this script context onto the existing script scope to
+      // avoid nesting script scopes.
+      if (deserialization_mode == DeserializationMode::kIncludingVariables) {
+        script_scope->SetScriptScopeInfo(handle(scope_info));
+      }
+      DCHECK(!scope_info->HasOuterScopeInfo());
+      break;
+    } else if (scope_info->scope_type() == FUNCTION_SCOPE ||
+               scope_info->scope_type() == EVAL_SCOPE) {
       // TODO(neis): For an eval scope, we currently create an ordinary function
       // context.  This is wrong and needs to be fixed.
       // https://bugs.chromium.org/p/v8/issues/detail?id=5295
-      DCHECK(scope_info->scope_type() == FUNCTION_SCOPE ||
-             scope_info->scope_type() == EVAL_SCOPE);
-      DeclarationScope* function_scope = new (zone)
-          DeclarationScope(zone, current_scope, FUNCTION_SCOPE, scope_info);
-      if (scope_info->IsAsmFunction()) function_scope->set_asm_function();
-      if (scope_info->IsAsmModule()) function_scope->set_asm_module();
-      current_scope = function_scope;
-    } else if (context->IsBlockContext()) {
-      Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
-      DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
+      outer_scope =
+          new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
+      if (scope_info->IsAsmFunction())
+        outer_scope->AsDeclarationScope()->set_asm_function();
+      if (scope_info->IsAsmModule())
+        outer_scope->AsDeclarationScope()->set_asm_module();
+    } else if (scope_info->scope_type() == BLOCK_SCOPE) {
       if (scope_info->is_declaration_scope()) {
-        current_scope = new (zone)
-            DeclarationScope(zone, current_scope, BLOCK_SCOPE, scope_info);
+        outer_scope =
+            new (zone) DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info));
       } else {
-        current_scope =
-            new (zone) Scope(zone, current_scope, BLOCK_SCOPE, scope_info);
+        outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
       }
+    } else if (scope_info->scope_type() == MODULE_SCOPE) {
+      outer_scope = new (zone)
+          ModuleScope(isolate, handle(scope_info), ast_value_factory);
     } else {
-      DCHECK(context->IsCatchContext());
-      String* name = context->catch_name();
-      current_scope =
-          new (zone) Scope(zone, current_scope,
-                           ast_value_factory->GetString(handle(name, isolate)));
+      DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
+      DCHECK_EQ(scope_info->LocalCount(), 1);
+      String* name = scope_info->LocalName(0);
+      outer_scope = new (zone)
+          Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
+                handle(scope_info));
     }
-    if (deserialization_mode == DeserializationMode::kDeserializeOffHeap) {
-      current_scope->DeserializeScopeInfo(isolate, ast_value_factory);
+    if (deserialization_mode == DeserializationMode::kScopesOnly) {
+      outer_scope->scope_info_ = Handle<ScopeInfo>::null();
     }
+    if (current_scope != nullptr) {
+      outer_scope->AddInnerScope(current_scope);
+    }
+    current_scope = outer_scope;
     if (innermost_scope == nullptr) innermost_scope = current_scope;
-    context = context->previous();
+    scope_info = scope_info->HasOuterScopeInfo() ? scope_info->OuterScopeInfo()
+                                                 : nullptr;
   }
 
+  if (innermost_scope == nullptr) return script_scope;
   script_scope->AddInnerScope(current_scope);
-  script_scope->PropagateScopeInfo();
-  return (innermost_scope == NULL) ? script_scope : innermost_scope;
-}
-
-void Scope::DeserializeScopeInfo(Isolate* isolate,
-                                 AstValueFactory* ast_value_factory) {
-  if (scope_info_.is_null()) return;
-
-  DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
-
-  std::set<const AstRawString*> names_seen;
-  // Internalize context local & globals variables.
-  for (int var = 0; var < scope_info_->ContextLocalCount() +
-                              scope_info_->ContextGlobalCount();
-       ++var) {
-    Handle<String> name_handle(scope_info_->ContextLocalName(var), isolate);
-    const AstRawString* name = ast_value_factory->GetString(name_handle);
-    if (!names_seen.insert(name).second) continue;
-    int index = Context::MIN_CONTEXT_SLOTS + var;
-    VariableMode mode = scope_info_->ContextLocalMode(var);
-    InitializationFlag init_flag = scope_info_->ContextLocalInitFlag(var);
-    MaybeAssignedFlag maybe_assigned_flag =
-        scope_info_->ContextLocalMaybeAssignedFlag(var);
-    VariableLocation location = var < scope_info_->ContextLocalCount()
-                                    ? VariableLocation::CONTEXT
-                                    : VariableLocation::GLOBAL;
-    Variable::Kind kind = Variable::NORMAL;
-    if (index == scope_info_->ReceiverContextSlotIndex()) {
-      kind = Variable::THIS;
-    }
-
-    Variable* result = variables_.Declare(zone(), this, name, mode, kind,
-                                          init_flag, maybe_assigned_flag);
-    result->AllocateTo(location, index);
-  }
-
-  // We must read parameters from the end since for multiply declared
-  // parameters the value of the last declaration of that parameter is used
-  // inside a function (and thus we need to look at the last index). Was bug#
-  // 1110337.
-  for (int index = scope_info_->ParameterCount() - 1; index >= 0; --index) {
-    Handle<String> name_handle(scope_info_->ParameterName(index), isolate);
-    const AstRawString* name = ast_value_factory->GetString(name_handle);
-    if (!names_seen.insert(name).second) continue;
-
-    VariableMode mode = DYNAMIC;
-    InitializationFlag init_flag = kCreatedInitialized;
-    MaybeAssignedFlag maybe_assigned_flag = kMaybeAssigned;
-    VariableLocation location = VariableLocation::LOOKUP;
-    Variable::Kind kind = Variable::NORMAL;
-
-    Variable* result = variables_.Declare(zone(), this, name, mode, kind,
-                                          init_flag, maybe_assigned_flag);
-    result->AllocateTo(location, index);
-  }
-
-  // Internalize function proxy for this scope.
-  if (scope_info_->HasFunctionName()) {
-    Handle<String> name_handle(scope_info_->FunctionName(), isolate);
-    const AstRawString* name = ast_value_factory->GetString(name_handle);
-    VariableMode mode;
-    int index = scope_info_->FunctionContextSlotIndex(*name_handle, &mode);
-    if (index >= 0) {
-      Variable* result = AsDeclarationScope()->DeclareFunctionVar(name);
-      DCHECK_EQ(mode, result->mode());
-      result->AllocateTo(VariableLocation::CONTEXT, index);
-    }
-  }
-
-  scope_info_ = Handle<ScopeInfo>::null();
+  return innermost_scope;
 }
 
 DeclarationScope* Scope::AsDeclarationScope() {
@@ -410,10 +426,124 @@
   return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
 }
 
-void Scope::Analyze(ParseInfo* info) {
+void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
+  DCHECK(is_sloppy(language_mode()));
+  DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
+         (is_block_scope() && outer_scope()->is_function_scope()));
+  DCHECK(HasSimpleParameters() || is_block_scope());
+  bool has_simple_parameters = HasSimpleParameters();
+  // For each variable which is used as a function declaration in a sloppy
+  // block,
+  SloppyBlockFunctionMap* map = sloppy_block_function_map();
+  for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
+    AstRawString* name = static_cast<AstRawString*>(p->key);
+
+    // If the variable wouldn't conflict with a lexical declaration
+    // or parameter,
+
+    // Check if there's a conflict with a parameter.
+    // This depends on the fact that functions always have a scope solely to
+    // hold complex parameters, and the names local to that scope are
+    // precisely the names of the parameters. IsDeclaredParameter(name) does
+    // not hold for names declared by complex parameters, nor are those
+    // bindings necessarily declared lexically, so we have to check for them
+    // explicitly. On the other hand, if there are not complex parameters,
+    // it is sufficient to just check IsDeclaredParameter.
+    if (!has_simple_parameters) {
+      if (outer_scope_->LookupLocal(name) != nullptr) {
+        continue;
+      }
+    } else {
+      if (IsDeclaredParameter(name)) {
+        continue;
+      }
+    }
+
+    bool var_created = false;
+
+    // Write in assignments to var for each block-scoped function declaration
+    auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
+
+    DeclarationScope* decl_scope = this;
+    while (decl_scope->is_eval_scope()) {
+      decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
+    }
+    Scope* outer_scope = decl_scope->outer_scope();
+
+    for (SloppyBlockFunctionStatement* delegate = delegates;
+         delegate != nullptr; delegate = delegate->next()) {
+      // Check if there's a conflict with a lexical declaration
+      Scope* query_scope = delegate->scope()->outer_scope();
+      Variable* var = nullptr;
+      bool should_hoist = true;
+
+      // Note that we perform this loop for each delegate named 'name',
+      // which may duplicate work if those delegates share scopes.
+      // It is not sufficient to just do a Lookup on query_scope: for
+      // example, that does not prevent hoisting of the function in
+      // `{ let e; try {} catch (e) { function e(){} } }`
+      do {
+        var = query_scope->LookupLocal(name);
+        if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+          should_hoist = false;
+          break;
+        }
+        query_scope = query_scope->outer_scope();
+      } while (query_scope != outer_scope);
+
+      if (!should_hoist) continue;
+
+      // Declare a var-style binding for the function in the outer scope
+      if (!var_created) {
+        var_created = true;
+        VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
+        Declaration* declaration =
+            factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
+        // Based on the preceding check, it doesn't matter what we pass as
+        // allow_harmony_restrictive_generators and
+        // sloppy_mode_block_scope_function_redefinition.
+        bool ok = true;
+        DeclareVariable(declaration, VAR,
+                        Variable::DefaultInitializationFlag(VAR), false,
+                        nullptr, &ok);
+        CHECK(ok);  // Based on the preceding check, this should not fail
+      }
+
+      Expression* assignment = factory->NewAssignment(
+          Token::ASSIGN, NewUnresolved(factory, name),
+          delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+      Statement* statement =
+          factory->NewExpressionStatement(assignment, kNoSourcePosition);
+      delegate->set_statement(statement);
+    }
+  }
+}
+
+void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
   DCHECK(info->literal() != NULL);
   DeclarationScope* scope = info->literal()->scope();
 
+  Handle<ScopeInfo> outer_scope_info;
+  if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
+    if (scope->outer_scope()) {
+      DeclarationScope* script_scope = new (info->zone())
+          DeclarationScope(info->zone(), info->ast_value_factory());
+      info->set_script_scope(script_scope);
+      scope->ReplaceOuterScope(Scope::DeserializeScopeChain(
+          info->isolate(), info->zone(), *outer_scope_info, script_scope,
+          info->ast_value_factory(),
+          Scope::DeserializationMode::kIncludingVariables));
+    } else {
+      DCHECK_EQ(outer_scope_info->scope_type(), SCRIPT_SCOPE);
+      scope->SetScriptScopeInfo(outer_scope_info);
+    }
+  }
+
+  if (scope->is_eval_scope() && is_sloppy(scope->language_mode())) {
+    AstNodeFactory factory(info->ast_value_factory());
+    scope->HoistSloppyBlockFunctions(&factory);
+  }
+
   // We are compiling one of three cases:
   // 1) top-level code,
   // 2) a function/eval/module on the top-level
@@ -422,10 +552,13 @@
          scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
          scope->outer_scope()->already_resolved_);
 
-  // Allocate the variables.
-  {
-    AstNodeFactory ast_node_factory(info->ast_value_factory());
-    scope->AllocateVariables(info, &ast_node_factory);
+  scope->AllocateVariables(info, mode);
+
+  // Ensuring that the outer script scope has a scope info avoids having
+  // special case for native contexts vs other contexts.
+  if (info->script_scope()->scope_info_.is_null()) {
+    info->script_scope()->scope_info_ =
+        handle(ScopeInfo::Empty(info->isolate()));
   }
 
 #ifdef DEBUG
@@ -446,41 +579,91 @@
   bool subclass_constructor = IsSubclassConstructor(function_kind_);
   Variable* var = Declare(
       zone(), this, ast_value_factory->this_string(),
-      subclass_constructor ? CONST : VAR, Variable::THIS,
+      subclass_constructor ? CONST : VAR, THIS_VARIABLE,
       subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
   receiver_ = var;
 }
 
+void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
+  DCHECK(is_function_scope());
+  DCHECK(!is_arrow_scope());
+
+  arguments_ = LookupLocal(ast_value_factory->arguments_string());
+  if (arguments_ == nullptr) {
+    // Declare 'arguments' variable which exists in all non arrow functions.
+    // Note that it might never be accessed, in which case it won't be
+    // allocated during variable allocation.
+    arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(),
+                         VAR, NORMAL_VARIABLE, kCreatedInitialized);
+  } else if (IsLexicalVariableMode(arguments_->mode())) {
+    // Check if there's lexically declared variable named arguments to avoid
+    // redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
+    arguments_ = nullptr;
+  }
+}
+
 void DeclarationScope::DeclareDefaultFunctionVariables(
     AstValueFactory* ast_value_factory) {
   DCHECK(is_function_scope());
   DCHECK(!is_arrow_scope());
-  // Declare 'arguments' variable which exists in all non arrow functions.
-  // Note that it might never be accessed, in which case it won't be
-  // allocated during variable allocation.
-  arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(), VAR,
-                       Variable::ARGUMENTS, kCreatedInitialized);
 
+  DeclareThis(ast_value_factory);
   new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
-                        CONST, Variable::NORMAL, kCreatedInitialized);
+                        CONST, NORMAL_VARIABLE, kCreatedInitialized);
 
   if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
       IsAccessorFunction(function_kind_)) {
     this_function_ =
         Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
-                Variable::NORMAL, kCreatedInitialized);
+                NORMAL_VARIABLE, kCreatedInitialized);
   }
 }
 
 Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
   DCHECK(is_function_scope());
   DCHECK_NULL(function_);
-  VariableMode mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
-  function_ = new (zone())
-      Variable(this, name, mode, Variable::NORMAL, kCreatedInitialized);
+  DCHECK_NULL(variables_.Lookup(name));
+  VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE
+                                                 : NORMAL_VARIABLE;
+  function_ =
+      new (zone()) Variable(this, name, CONST, kind, kCreatedInitialized);
+  if (calls_sloppy_eval()) {
+    NonLocal(name, DYNAMIC);
+  } else {
+    variables_.Add(zone(), function_);
+  }
   return function_;
 }
 
+bool Scope::HasBeenRemoved() const {
+  // TODO(neis): Store this information somewhere instead of calculating it.
+
+  if (!is_block_scope()) return false;  // Shortcut.
+
+  Scope* parent = outer_scope();
+  if (parent == nullptr) {
+    DCHECK(is_script_scope());
+    return false;
+  }
+
+  Scope* sibling = parent->inner_scope();
+  for (; sibling != nullptr; sibling = sibling->sibling()) {
+    if (sibling == this) return false;
+  }
+
+  DCHECK_NULL(inner_scope_);
+  return true;
+}
+
+Scope* Scope::GetUnremovedScope() {
+  Scope* scope = this;
+  while (scope != nullptr && scope->HasBeenRemoved()) {
+    scope = scope->outer_scope();
+  }
+  DCHECK_NOT_NULL(scope);
+  return scope;
+}
+
 Scope* Scope::FinalizeBlockScope() {
   DCHECK(is_block_scope());
 
@@ -530,7 +713,7 @@
   DCHECK_EQ(new_parent, new_parent->GetClosureScope());
   DCHECK_NULL(new_parent->inner_scope_);
   DCHECK_NULL(new_parent->unresolved_);
-  DCHECK_EQ(0, new_parent->temps()->length());
+  DCHECK_EQ(0, new_parent->locals_.length());
   Scope* inner_scope = new_parent->sibling_;
   if (inner_scope != top_inner_scope_) {
     for (; inner_scope->sibling() != top_inner_scope_;
@@ -557,25 +740,31 @@
     outer_scope_->unresolved_ = top_unresolved_;
   }
 
-  if (outer_scope_->GetClosureScope()->temps()->length() != top_temp_) {
-    ZoneList<Variable*>* temps = outer_scope_->GetClosureScope()->temps();
-    for (int i = top_temp_; i < temps->length(); i++) {
-      Variable* temp = temps->at(i);
-      DCHECK_EQ(temp->scope(), temp->scope()->GetClosureScope());
-      DCHECK_NE(temp->scope(), new_parent);
-      temp->set_scope(new_parent);
-      new_parent->AddTemporary(temp);
+  // TODO(verwaest): This currently only moves do-expression declared variables
+  // in default arguments that weren't already previously declared with the same
+  // name in the closure-scope. See
+  // test/mjsunit/harmony/default-parameter-do-expression.js.
+  DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
+  for (int i = top_local_; i < outer_closure->locals_.length(); i++) {
+    Variable* local = outer_closure->locals_.at(i);
+    DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
+    DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
+    DCHECK_NE(local->scope(), new_parent);
+    local->set_scope(new_parent);
+    new_parent->AddLocal(local);
+    if (local->mode() == VAR) {
+      outer_closure->variables_.Remove(local);
+      new_parent->variables_.Add(new_parent->zone(), local);
     }
-    temps->Rewind(top_temp_);
   }
+  outer_closure->locals_.Rewind(top_local_);
+  outer_closure->decls_.Rewind(top_decl_);
 }
 
 void Scope::ReplaceOuterScope(Scope* outer) {
   DCHECK_NOT_NULL(outer);
   DCHECK_NOT_NULL(outer_scope_);
   DCHECK(!already_resolved_);
-  DCHECK(!outer->already_resolved_);
-  DCHECK(!outer_scope_->already_resolved_);
   outer_scope_->RemoveInnerScope(this);
   outer->AddInnerScope(this);
   outer_scope_ = outer;
@@ -589,57 +778,44 @@
   if (calls_eval()) other->RecordEvalCall();
 }
 
-
-Variable* Scope::LookupLocal(const AstRawString* name) {
-  Variable* result = variables_.Lookup(name);
-  if (result != NULL || scope_info_.is_null()) {
-    return result;
-  }
+Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
   Handle<String> name_handle = name->string();
   // The Scope is backed up by ScopeInfo. This means it cannot operate in a
   // heap-independent mode, and all strings must be internalized immediately. So
   // it's ok to get the Handle<String> here.
   // If we have a serialized scope info, we might find the variable there.
   // There should be no local slot with the given name.
-  DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0);
+  DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
 
-  // Check context slot lookup.
   VariableMode mode;
-  VariableLocation location = VariableLocation::CONTEXT;
   InitializationFlag init_flag;
   MaybeAssignedFlag maybe_assigned_flag;
+
+  VariableLocation location = VariableLocation::CONTEXT;
   int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
                                           &init_flag, &maybe_assigned_flag);
-  if (index < 0) {
-    location = VariableLocation::GLOBAL;
-    index = ScopeInfo::ContextGlobalSlotIndex(scope_info_, name_handle, &mode,
-                                              &init_flag, &maybe_assigned_flag);
-  }
-  if (index < 0) {
-    // Check parameters.
-    index = scope_info_->ParameterIndex(*name_handle);
-    if (index < 0) return NULL;
-
-    mode = DYNAMIC;
-    location = VariableLocation::LOOKUP;
-    init_flag = kCreatedInitialized;
-    // Be conservative and flag parameters as maybe assigned. Better information
-    // would require ScopeInfo to serialize the maybe_assigned bit also for
-    // parameters.
-    maybe_assigned_flag = kMaybeAssigned;
-  } else {
-    DCHECK(location != VariableLocation::GLOBAL ||
-           (is_script_scope() && IsDeclaredVariableMode(mode) &&
-            !IsLexicalVariableMode(mode)));
+  if (index < 0 && scope_type() == MODULE_SCOPE) {
+    location = VariableLocation::MODULE;
+    index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
+                                     &maybe_assigned_flag);
   }
 
-  Variable::Kind kind = Variable::NORMAL;
+  if (index < 0) {
+    index = scope_info_->FunctionContextSlotIndex(*name_handle);
+    if (index < 0) return nullptr;  // Nowhere found.
+    Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
+    DCHECK_EQ(CONST, var->mode());
+    var->AllocateTo(VariableLocation::CONTEXT, index);
+    return variables_.Lookup(name);
+  }
+
+  VariableKind kind = NORMAL_VARIABLE;
   if (location == VariableLocation::CONTEXT &&
       index == scope_info_->ReceiverContextSlotIndex()) {
-    kind = Variable::THIS;
+    kind = THIS_VARIABLE;
   }
   // TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
-  // ARGUMENTS bindings as their corresponding Variable::Kind.
+  // ARGUMENTS bindings as their corresponding VariableKind.
 
   Variable* var = variables_.Declare(zone(), this, name, mode, kind, init_flag,
                                      maybe_assigned_flag);
@@ -647,24 +823,6 @@
   return var;
 }
 
-Variable* DeclarationScope::LookupFunctionVar(const AstRawString* name) {
-  if (function_ != nullptr && function_->raw_name() == name) {
-    return function_;
-  } else if (!scope_info_.is_null()) {
-    // If we are backed by a scope info, try to lookup the variable there.
-    VariableMode mode;
-    int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode);
-    if (index < 0) return nullptr;
-    Variable* var = DeclareFunctionVar(name);
-    DCHECK_EQ(mode, var->mode());
-    var->AllocateTo(VariableLocation::CONTEXT, index);
-    return var;
-  } else {
-    return nullptr;
-  }
-}
-
-
 Variable* Scope::Lookup(const AstRawString* name) {
   for (Scope* scope = this;
        scope != NULL;
@@ -679,21 +837,22 @@
     const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
     bool* is_duplicate, AstValueFactory* ast_value_factory) {
   DCHECK(!already_resolved_);
-  DCHECK(is_function_scope());
+  DCHECK(is_function_scope() || is_module_scope());
+  DCHECK(!has_rest_);
   DCHECK(!is_optional || !is_rest);
   Variable* var;
   if (mode == TEMPORARY) {
     var = NewTemporary(name);
   } else {
-    var = Declare(zone(), this, name, mode, Variable::NORMAL,
-                  kCreatedInitialized);
+    var =
+        Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
     // TODO(wingo): Avoid O(n^2) check.
     *is_duplicate = IsDeclaredParameter(name);
   }
   if (!is_optional && !is_rest && arity_ == params_.length()) {
     ++arity_;
   }
-  if (is_rest) rest_index_ = num_parameters();
+  has_rest_ = is_rest;
   params_.Add(var, zone());
   if (name == ast_value_factory->arguments_string()) {
     has_arguments_parameter_ = true;
@@ -702,7 +861,7 @@
 }
 
 Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
-                              InitializationFlag init_flag, Variable::Kind kind,
+                              InitializationFlag init_flag, VariableKind kind,
                               MaybeAssignedFlag maybe_assigned_flag) {
   DCHECK(!already_resolved_);
   // This function handles VAR, LET, and CONST modes.  DYNAMIC variables are
@@ -713,10 +872,138 @@
                  maybe_assigned_flag);
 }
 
+Variable* Scope::DeclareVariable(
+    Declaration* declaration, VariableMode mode, InitializationFlag init,
+    bool allow_harmony_restrictive_generators,
+    bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
+  DCHECK(IsDeclaredVariableMode(mode));
+  DCHECK(!already_resolved_);
+
+  if (mode == VAR && !is_declaration_scope()) {
+    return GetDeclarationScope()->DeclareVariable(
+        declaration, mode, init, allow_harmony_restrictive_generators,
+        sloppy_mode_block_scope_function_redefinition, ok);
+  }
+  DCHECK(!is_catch_scope());
+  DCHECK(!is_with_scope());
+  DCHECK(is_declaration_scope() ||
+         (IsLexicalVariableMode(mode) && is_block_scope()));
+
+  VariableProxy* proxy = declaration->proxy();
+  DCHECK(proxy->raw_name() != NULL);
+  const AstRawString* name = proxy->raw_name();
+  bool is_function_declaration = declaration->IsFunctionDeclaration();
+
+  Variable* var = nullptr;
+  if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
+    // In a var binding in a sloppy direct eval, pollute the enclosing scope
+    // with this new binding by doing the following:
+    // The proxy is bound to a lookup variable to force a dynamic declaration
+    // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
+    VariableKind kind = NORMAL_VARIABLE;
+    // TODO(sigurds) figure out if kNotAssigned is OK here
+    var = new (zone()) Variable(this, name, mode, kind, init, kNotAssigned);
+    var->AllocateTo(VariableLocation::LOOKUP, -1);
+  } else {
+    // Declare the variable in the declaration scope.
+    var = LookupLocal(name);
+    if (var == NULL) {
+      // Declare the name.
+      VariableKind kind = NORMAL_VARIABLE;
+      if (is_function_declaration) {
+        kind = FUNCTION_VARIABLE;
+      }
+      var = DeclareLocal(name, mode, init, kind, kNotAssigned);
+    } else if (IsLexicalVariableMode(mode) ||
+               IsLexicalVariableMode(var->mode())) {
+      // Allow duplicate function decls for web compat, see bug 4693.
+      bool duplicate_allowed = false;
+      if (is_sloppy(language_mode()) && is_function_declaration &&
+          var->is_function()) {
+        DCHECK(IsLexicalVariableMode(mode) &&
+               IsLexicalVariableMode(var->mode()));
+        // If the duplication is allowed, then the var will show up
+        // in the SloppyBlockFunctionMap and the new FunctionKind
+        // will be a permitted duplicate.
+        FunctionKind function_kind =
+            declaration->AsFunctionDeclaration()->fun()->kind();
+        duplicate_allowed =
+            GetDeclarationScope()->sloppy_block_function_map()->Lookup(
+                const_cast<AstRawString*>(name), name->hash()) != nullptr &&
+            !IsAsyncFunction(function_kind) &&
+            !(allow_harmony_restrictive_generators &&
+              IsGeneratorFunction(function_kind));
+      }
+      if (duplicate_allowed) {
+        *sloppy_mode_block_scope_function_redefinition = true;
+      } else {
+        // The name was declared in this scope before; check for conflicting
+        // re-declarations. We have a conflict if either of the declarations
+        // is not a var (in script scope, we also have to ignore legacy const
+        // for compatibility). There is similar code in runtime.cc in the
+        // Declare functions. The function CheckConflictingVarDeclarations
+        // checks for var and let bindings from different scopes whereas this
+        // is a check for conflicting declarations within the same scope. This
+        // check also covers the special case
+        //
+        // function () { let x; { var x; } }
+        //
+        // because the var declaration is hoisted to the function scope where
+        // 'x' is already bound.
+        DCHECK(IsDeclaredVariableMode(var->mode()));
+        // In harmony we treat re-declarations as early errors. See
+        // ES5 16 for a definition of early errors.
+        *ok = false;
+        return nullptr;
+      }
+    } else if (mode == VAR) {
+      var->set_maybe_assigned();
+    }
+  }
+  DCHECK_NOT_NULL(var);
+
+  // We add a declaration node for every declaration. The compiler
+  // will only generate code if necessary. In particular, declarations
+  // for inner local variables that do not represent functions won't
+  // result in any generated code.
+  //
+  // This will lead to multiple declaration nodes for the
+  // same variable if it is declared several times. This is not a
+  // semantic issue, but it may be a performance issue since it may
+  // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
+  decls_.Add(declaration, zone());
+  proxy->BindTo(var);
+  return var;
+}
+
+VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
+                                    const AstRawString* name,
+                                    int start_position, int end_position,
+                                    VariableKind kind) {
+  // Note that we must not share the unresolved variables with
+  // the same name because they may be removed selectively via
+  // RemoveUnresolved().
+  DCHECK(!already_resolved_);
+  DCHECK_EQ(!needs_migration_, factory->zone() == zone());
+  VariableProxy* proxy =
+      factory->NewVariableProxy(name, kind, start_position, end_position);
+  proxy->set_next_unresolved(unresolved_);
+  unresolved_ = proxy;
+  return proxy;
+}
+
+void Scope::AddUnresolved(VariableProxy* proxy) {
+  DCHECK(!already_resolved_);
+  DCHECK(!proxy->is_resolved());
+  proxy->set_next_unresolved(unresolved_);
+  unresolved_ = proxy;
+}
+
 Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
-                                                 Variable::Kind kind) {
+                                                 VariableKind kind) {
   DCHECK(is_script_scope());
-  return Declare(zone(), this, name, DYNAMIC_GLOBAL, kind, kCreatedInitialized);
+  return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind,
+                            kCreatedInitialized);
 }
 
 
@@ -739,24 +1026,34 @@
   return false;
 }
 
+bool Scope::RemoveUnresolved(const AstRawString* name) {
+  if (unresolved_->raw_name() == name) {
+    VariableProxy* removed = unresolved_;
+    unresolved_ = unresolved_->next_unresolved();
+    removed->set_next_unresolved(nullptr);
+    return true;
+  }
+  VariableProxy* current = unresolved_;
+  while (current != nullptr) {
+    VariableProxy* next = current->next_unresolved();
+    if (next->raw_name() == name) {
+      current->set_next_unresolved(next->next_unresolved());
+      next->set_next_unresolved(nullptr);
+      return true;
+    }
+    current = next;
+  }
+  return false;
+}
 
 Variable* Scope::NewTemporary(const AstRawString* name) {
   DeclarationScope* scope = GetClosureScope();
-  Variable* var = new(zone()) Variable(scope,
-                                       name,
-                                       TEMPORARY,
-                                       Variable::NORMAL,
-                                       kCreatedInitialized);
-  scope->AddTemporary(var);
+  Variable* var = new (zone())
+      Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
+  scope->AddLocal(var);
   return var;
 }
 
-void Scope::AddDeclaration(Declaration* declaration) {
-  DCHECK(!already_resolved_);
-  decls_.Add(declaration, zone());
-}
-
-
 Declaration* Scope::CheckConflictingVarDeclarations() {
   int length = decls_.length();
   for (int i = 0; i < length; i++) {
@@ -806,63 +1103,34 @@
   return nullptr;
 }
 
-void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
-                                         ZoneList<Variable*>* context_locals,
-                                         ZoneList<Variable*>* context_globals) {
-  DCHECK(stack_locals != NULL);
-  DCHECK(context_locals != NULL);
-  DCHECK(context_globals != NULL);
-
-  // Collect temporaries which are always allocated on the stack, unless the
-  // context as a whole has forced context allocation.
-  if (is_declaration_scope()) {
-    ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
-    for (int i = 0; i < temps->length(); i++) {
-      Variable* var = (*temps)[i];
-      if (var->is_used()) {
-        if (var->IsContextSlot()) {
-          DCHECK(has_forced_context_allocation());
-          context_locals->Add(var, zone());
-        } else if (var->IsStackLocal()) {
-          stack_locals->Add(var, zone());
-        } else {
-          DCHECK(var->IsParameter());
-        }
-      }
-    }
-  }
-
-  for (int i = 0; i < ordered_variables_.length(); i++) {
-    Variable* var = ordered_variables_[i];
-    if (var->IsStackLocal()) {
-      stack_locals->Add(var, zone());
-    } else if (var->IsContextSlot()) {
-      context_locals->Add(var, zone());
-    } else if (var->IsGlobalSlot()) {
-      context_globals->Add(var, zone());
-    }
-  }
-}
-
-void DeclarationScope::AllocateVariables(ParseInfo* info,
-                                         AstNodeFactory* factory) {
-  // 1) Propagate scope information.
-  PropagateScopeInfo();
-
-  // 2) Resolve variables.
-  ResolveVariablesRecursively(info, factory);
-
-  // 3) Allocate variables.
+void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
+  ResolveVariablesRecursively(info);
   AllocateVariablesRecursively();
+
+  MaybeHandle<ScopeInfo> outer_scope;
+  for (const Scope* s = outer_scope_; s != nullptr; s = s->outer_scope_) {
+    if (s->scope_info_.is_null()) continue;
+    outer_scope = s->scope_info_;
+    break;
+  }
+  AllocateScopeInfosRecursively(info->isolate(), mode, outer_scope);
+  // The debugger expects all shared function infos to contain a scope info.
+  // Since the top-most scope will end up in a shared function info, make sure
+  // it has one, even if it doesn't need a scope info.
+  // TODO(jochen|yangguo): Remove this requirement.
+  if (scope_info_.is_null()) {
+    scope_info_ = ScopeInfo::Create(info->isolate(), zone(), this, outer_scope);
+  }
 }
 
-
-bool Scope::AllowsLazyParsing() const {
-  // If we are inside a block scope, we must parse eagerly to find out how
-  // to allocate variables on the block scope. At this point, declarations may
-  // not have yet been parsed.
+bool Scope::AllowsLazyParsingWithoutUnresolvedVariables() const {
+  // If we are inside a block scope, we must find unresolved variables in the
+  // inner scopes to find out how to allocate variables on the block scope. At
+  // this point, declarations may not have yet been parsed.
   for (const Scope* s = this; s != nullptr; s = s->outer_scope_) {
     if (s->is_block_scope()) return false;
+    // TODO(marja): Refactor parsing modes: also add s->is_function_scope()
+    // here.
   }
   return true;
 }
@@ -932,6 +1200,16 @@
   return scope->AsDeclarationScope();
 }
 
+ModuleScope* Scope::GetModuleScope() {
+  Scope* scope = this;
+  DCHECK(!scope->is_script_scope());
+  while (!scope->is_module_scope()) {
+    scope = scope->outer_scope();
+    DCHECK_NOT_NULL(scope);
+  }
+  return scope->AsModuleScope();
+}
+
 DeclarationScope* Scope::GetReceiverScope() {
   Scope* scope = this;
   while (!scope->is_script_scope() &&
@@ -942,18 +1220,17 @@
   return scope->AsDeclarationScope();
 }
 
-
-
-Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
-  if (scope_info_.is_null()) {
-    scope_info_ = ScopeInfo::Create(isolate, zone(), this);
+Scope* Scope::GetOuterScopeWithContext() {
+  Scope* scope = outer_scope_;
+  while (scope && !scope->NeedsContext()) {
+    scope = scope->outer_scope();
   }
-  return scope_info_;
+  return scope;
 }
 
 Handle<StringSet> DeclarationScope::CollectNonLocals(
     ParseInfo* info, Handle<StringSet> non_locals) {
-  VariableProxy* free_variables = FetchFreeVariables(this, info);
+  VariableProxy* free_variables = FetchFreeVariables(this, true, info);
   for (VariableProxy* proxy = free_variables; proxy != nullptr;
        proxy = proxy->next_unresolved()) {
     non_locals = StringSet::Add(non_locals, proxy->name());
@@ -961,38 +1238,73 @@
   return non_locals;
 }
 
-void DeclarationScope::AnalyzePartially(DeclarationScope* migrate_to,
-                                        AstNodeFactory* ast_node_factory) {
-  // Gather info from inner scopes.
-  PropagateScopeInfo();
+void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
+                                            bool aborted) {
+  DCHECK(is_function_scope());
 
-  // Try to resolve unresolved variables for this Scope and migrate those which
-  // cannot be resolved inside. It doesn't make sense to try to resolve them in
-  // the outer Scopes here, because they are incomplete.
-  for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
-       proxy = proxy->next_unresolved()) {
-    DCHECK(!proxy->is_resolved());
-    VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
-    migrate_to->AddUnresolved(copy);
+  // Reset all non-trivial members.
+  decls_.Rewind(0);
+  locals_.Rewind(0);
+  sloppy_block_function_map_.Clear();
+  variables_.Clear();
+  // Make sure we won't walk the scope tree from here on.
+  inner_scope_ = nullptr;
+  unresolved_ = nullptr;
+
+  // TODO(verwaest): We should properly preparse the parameters (no declarations
+  // should be created), and reparse on abort.
+  if (aborted) {
+    if (!IsArrowFunction(function_kind_)) {
+      DeclareDefaultFunctionVariables(ast_value_factory);
+    }
+    // Recreate declarations for parameters.
+    for (int i = 0; i < params_.length(); i++) {
+      Variable* var = params_[i];
+      if (var->mode() == TEMPORARY) {
+        locals_.Add(var, zone());
+      } else if (variables_.Lookup(var->raw_name()) == nullptr) {
+        variables_.Add(zone(), var);
+        locals_.Add(var, zone());
+      }
+    }
+  } else {
+    params_.Rewind(0);
   }
 
-  // Push scope data up to migrate_to. Note that migrate_to and this Scope
-  // describe the same Scope, just in different Zones.
-  PropagateUsageFlagsToScope(migrate_to);
-  if (scope_uses_super_property_) migrate_to->scope_uses_super_property_ = true;
-  if (inner_scope_calls_eval_) migrate_to->inner_scope_calls_eval_ = true;
+#ifdef DEBUG
+  needs_migration_ = false;
+#endif
+
+  is_lazily_parsed_ = !aborted;
+}
+
+void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
   DCHECK(!force_eager_compilation_);
-  migrate_to->set_start_position(start_position_);
-  migrate_to->set_end_position(end_position_);
-  migrate_to->set_language_mode(language_mode());
-  migrate_to->arity_ = arity_;
-  migrate_to->force_context_allocation_ = force_context_allocation_;
-  outer_scope_->RemoveInnerScope(this);
-  DCHECK_EQ(outer_scope_, migrate_to->outer_scope_);
-  DCHECK_EQ(outer_scope_->zone(), migrate_to->zone());
-  DCHECK_EQ(NeedsHomeObject(), migrate_to->NeedsHomeObject());
-  DCHECK_EQ(asm_function_, migrate_to->asm_function_);
-  DCHECK_EQ(arguments() != nullptr, migrate_to->arguments() != nullptr);
+  VariableProxy* unresolved = nullptr;
+
+  if (!outer_scope_->is_script_scope()) {
+    // Try to resolve unresolved variables for this Scope and migrate those
+    // which cannot be resolved inside. It doesn't make sense to try to resolve
+    // them in the outer Scopes here, because they are incomplete.
+    for (VariableProxy* proxy =
+             FetchFreeVariables(this, !FLAG_lazy_inner_functions);
+         proxy != nullptr; proxy = proxy->next_unresolved()) {
+      DCHECK(!proxy->is_resolved());
+      VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+      copy->set_next_unresolved(unresolved);
+      unresolved = copy;
+    }
+
+    // Clear arguments_ if unused. This is used as a signal for optimization.
+    if (arguments_ != nullptr &&
+        !(MustAllocate(arguments_) && !has_arguments_parameter_)) {
+      arguments_ = nullptr;
+    }
+  }
+
+  ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
+
+  unresolved_ = unresolved;
 }
 
 #ifdef DEBUG
@@ -1040,9 +1352,6 @@
     case VariableLocation::CONTEXT:
       PrintF("context[%d]", var->index());
       break;
-    case VariableLocation::GLOBAL:
-      PrintF("global[%d]", var->index());
-      break;
     case VariableLocation::LOOKUP:
       PrintF("lookup");
       break;
@@ -1055,7 +1364,7 @@
 
 static void PrintVar(int indent, Variable* var) {
   if (var->is_used() || !var->IsUnallocated()) {
-    Indent(indent, Variable::Mode2String(var->mode()));
+    Indent(indent, VariableMode2String(var->mode()));
     PrintF(" ");
     if (var->raw_name()->IsEmpty())
       PrintF(".%p", reinterpret_cast<void*>(var));
@@ -1077,14 +1386,16 @@
   }
 }
 
-
-static void PrintMap(int indent, VariableMap* map) {
-  for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+static void PrintMap(int indent, VariableMap* map, bool locals) {
+  for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
-    if (var == NULL) {
-      Indent(indent, "<?>\n");
-    } else {
-      PrintVar(indent, var);
+    bool local = !IsDynamicVariableMode(var->mode());
+    if (locals ? local : !local) {
+      if (var == nullptr) {
+        Indent(indent, "<?>\n");
+      } else {
+        PrintVar(indent, var);
+      }
     }
   }
 }
@@ -1143,14 +1454,14 @@
     Indent(n1, "// scope uses 'super' property\n");
   }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
+  if (is_lazily_parsed_) Indent(n1, "// lazily parsed\n");
   if (num_stack_slots_ > 0) {
     Indent(n1, "// ");
     PrintF("%d stack slots\n", num_stack_slots_);
   }
   if (num_heap_slots_ > 0) {
     Indent(n1, "// ");
-    PrintF("%d heap slots (including %d global slots)\n", num_heap_slots_,
-           num_global_slots_);
+    PrintF("%d heap slots\n", num_heap_slots_);
   }
 
   // Print locals.
@@ -1159,28 +1470,12 @@
     PrintVar(n1, function);
   }
 
-  if (is_declaration_scope()) {
-    bool printed_header = false;
-    ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
-    for (int i = 0; i < temps->length(); i++) {
-      if (!printed_header) {
-        printed_header = true;
-        Indent(n1, "// temporary vars:\n");
-      }
-      PrintVar(n1, (*temps)[i]);
-    }
-  }
-
   if (variables_.Start() != NULL) {
     Indent(n1, "// local vars:\n");
-    PrintMap(n1, &variables_);
-  }
+    PrintMap(n1, &variables_, true);
 
-  if (dynamics_ != NULL) {
     Indent(n1, "// dynamic vars:\n");
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC));
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
+    PrintMap(n1, &variables_, false);
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -1208,34 +1503,26 @@
 }
 
 void Scope::CheckZones() {
+  DCHECK(!needs_migration_);
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
     CHECK_EQ(scope->zone(), zone());
+    scope->CheckZones();
   }
 }
 #endif  // DEBUG
 
 Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
-  if (dynamics_ == NULL) dynamics_ = new (zone()) DynamicScopePart(zone());
-  VariableMap* map = dynamics_->GetMap(mode);
-  Variable* var = map->Lookup(name);
-  if (var == NULL) {
-    // Declare a new non-local.
-    DCHECK(!IsLexicalVariableMode(mode));
-    var = map->Declare(zone(), NULL, name, mode, Variable::NORMAL,
-                       kCreatedInitialized);
-    // Allocate it by giving it a dynamic lookup.
-    var->AllocateTo(VariableLocation::LOOKUP, -1);
-  }
+  // Declare a new non-local.
+  DCHECK(IsDynamicVariableMode(mode));
+  Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE,
+                                     kCreatedInitialized);
+  // Allocate it by giving it a dynamic lookup.
+  var->AllocateTo(VariableLocation::LOOKUP, -1);
   return var;
 }
 
-Variable* Scope::LookupRecursive(VariableProxy* proxy,
-                                 BindingKind* binding_kind,
-                                 AstNodeFactory* factory,
-                                 Scope* outer_scope_end) {
+Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
   DCHECK_NE(outer_scope_end, this);
-  DCHECK_NOT_NULL(binding_kind);
-  DCHECK_EQ(UNBOUND, *binding_kind);
   // Short-cut: whenever we find a debug-evaluate scope, just look everything up
   // dynamically. Debug-evaluate doesn't properly create scope info for the
   // lookups it does. It may not have a valid 'this' declaration, and anything
@@ -1243,10 +1530,7 @@
   // variables.
   // TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
   // scopes in which it's evaluating.
-  if (is_debug_evaluate_scope_) {
-    *binding_kind = DYNAMIC_LOOKUP;
-    return nullptr;
-  }
+  if (is_debug_evaluate_scope_) return NonLocal(proxy->raw_name(), DYNAMIC);
 
   // Try to find the variable in this scope.
   Variable* var = LookupLocal(proxy->raw_name());
@@ -1254,54 +1538,49 @@
   // We found a variable and we are done. (Even if there is an 'eval' in this
   // scope which introduces the same variable again, the resulting variable
   // remains the same.)
-  if (var != nullptr) {
-    *binding_kind = BOUND;
-    return var;
+  if (var != nullptr) return var;
+
+  if (outer_scope_ == outer_scope_end) {
+    // We may just be trying to find all free variables. In that case, don't
+    // declare them in the outer scope.
+    if (!is_script_scope()) return nullptr;
+    // No binding has been found. Declare a variable on the global object.
+    return AsDeclarationScope()->DeclareDynamicGlobal(proxy->raw_name(),
+                                                      NORMAL_VARIABLE);
   }
 
-  // We did not find a variable locally. Check against the function variable, if
-  // any.
-  if (is_function_scope()) {
-    var = AsDeclarationScope()->LookupFunctionVar(proxy->raw_name());
-    if (var != nullptr) {
-      *binding_kind = calls_sloppy_eval() ? BOUND_EVAL_SHADOWED : BOUND;
-      return var;
-    }
-  }
+  DCHECK(!is_script_scope());
 
-  if (outer_scope_ != outer_scope_end) {
-    var = outer_scope_->LookupRecursive(proxy, binding_kind, factory,
-                                        outer_scope_end);
-    if (*binding_kind == BOUND && is_function_scope()) {
+  var = outer_scope_->LookupRecursive(proxy, outer_scope_end);
+
+  // The variable could not be resolved statically.
+  if (var == nullptr) return var;
+
+  if (is_function_scope() && !var->is_dynamic()) {
+    var->ForceContextAllocation();
+  }
+  // "this" can't be shadowed by "eval"-introduced bindings or by "with"
+  // scopes.
+  // TODO(wingo): There are other variables in this category; add them.
+  if (var->is_this()) return var;
+
+  if (is_with_scope()) {
+    // The current scope is a with scope, so the variable binding can not be
+    // statically resolved. However, note that it was necessary to do a lookup
+    // in the outer scope anyway, because if a binding exists in an outer
+    // scope, the associated variable has to be marked as potentially being
+    // accessed from inside of an inner with scope (the property may not be in
+    // the 'with' object).
+    if (!var->is_dynamic() && var->IsUnallocated()) {
+      DCHECK(!already_resolved_);
+      var->set_is_used();
       var->ForceContextAllocation();
+      if (proxy->is_assigned()) var->set_maybe_assigned();
     }
-    // "this" can't be shadowed by "eval"-introduced bindings or by "with"
-    // scopes.
-    // TODO(wingo): There are other variables in this category; add them.
-    if (var != nullptr && var->is_this()) return var;
-
-    if (is_with_scope()) {
-      // The current scope is a with scope, so the variable binding can not be
-      // statically resolved. However, note that it was necessary to do a lookup
-      // in the outer scope anyway, because if a binding exists in an outer
-      // scope, the associated variable has to be marked as potentially being
-      // accessed from inside of an inner with scope (the property may not be in
-      // the 'with' object).
-      if (var != nullptr && var->IsUnallocated()) {
-        DCHECK(!already_resolved_);
-        var->set_is_used();
-        var->ForceContextAllocation();
-        if (proxy->is_assigned()) var->set_maybe_assigned();
-      }
-      *binding_kind = DYNAMIC_LOOKUP;
-      return nullptr;
-    }
-  } else {
-    DCHECK(!is_with_scope());
-    DCHECK(is_function_scope() || is_script_scope() || is_eval_scope());
+    return NonLocal(proxy->raw_name(), DYNAMIC);
   }
 
-  if (calls_sloppy_eval() && is_declaration_scope() && !is_script_scope()) {
+  if (calls_sloppy_eval() && is_declaration_scope()) {
     // A variable binding may have been found in an outer scope, but the current
     // scope makes a sloppy 'eval' call, so the found variable may not be the
     // correct one (the 'eval' may introduce a binding with the same name). In
@@ -1309,40 +1588,58 @@
     // scopes that can host var bindings (declaration scopes) need be considered
     // here (this excludes block and catch scopes), and variable lookups at
     // script scope are always dynamic.
-    if (*binding_kind == BOUND) {
-      *binding_kind = BOUND_EVAL_SHADOWED;
-    } else if (*binding_kind == UNBOUND) {
-      *binding_kind = UNBOUND_EVAL_SHADOWED;
+    if (var->IsGlobalObjectProperty()) {
+      return NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
     }
+
+    if (var->is_dynamic()) return var;
+
+    Variable* invalidated = var;
+    var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
+    var->set_local_if_not_shadowed(invalidated);
   }
 
   return var;
 }
 
-void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
-                            AstNodeFactory* factory) {
+void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
   DCHECK(info->script_scope()->is_script_scope());
+  DCHECK(!proxy->is_resolved());
+  Variable* var = LookupRecursive(proxy, nullptr);
+  ResolveTo(info, proxy, var);
 
-  // If the proxy is already resolved there's nothing to do
-  // (functions and consts may be resolved by the parser).
-  if (proxy->is_resolved()) return;
-
-  // Otherwise, try to resolve the variable.
-  BindingKind binding_kind = UNBOUND;
-  Variable* var = LookupRecursive(proxy, &binding_kind, factory);
-
-  ResolveTo(info, binding_kind, proxy, var);
+  if (FLAG_lazy_inner_functions) {
+    if (info != nullptr && info->is_native()) return;
+    // Pessimistically force context allocation for all variables to which inner
+    // scope variables could potentially resolve to.
+    Scope* scope = GetClosureScope()->outer_scope_;
+    while (scope != nullptr && scope->scope_info_.is_null()) {
+      var = scope->LookupLocal(proxy->raw_name());
+      if (var != nullptr) {
+        // Since we don't lazy parse inner arrow functions, inner functions
+        // cannot refer to the outer "this".
+        if (!var->is_dynamic() && !var->is_this() &&
+            !var->has_forced_context_allocation()) {
+          var->ForceContextAllocation();
+          var->set_is_used();
+          // We don't know what the (potentially lazy parsed) inner function
+          // does with the variable; pessimistically assume that it's assigned.
+          var->set_maybe_assigned();
+        }
+      }
+      scope = scope->outer_scope_;
+    }
+  }
 }
 
-void Scope::ResolveTo(ParseInfo* info, BindingKind binding_kind,
-                      VariableProxy* proxy, Variable* var) {
+void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
 #ifdef DEBUG
   if (info->script_is_native()) {
     // To avoid polluting the global object in native scripts
     //  - Variables must not be allocated to the global scope.
     CHECK_NOT_NULL(outer_scope());
     //  - Variables must be bound locally or unallocated.
-    if (BOUND != binding_kind) {
+    if (var->IsGlobalObjectProperty()) {
       // The following variable name may be minified. If so, disable
       // minification in js2c.py for better output.
       Handle<String> name = proxy->raw_name()->string();
@@ -1357,85 +1654,44 @@
   }
 #endif
 
-  switch (binding_kind) {
-    case BOUND:
-      break;
-
-    case BOUND_EVAL_SHADOWED:
-      // We either found a variable binding that might be shadowed by eval  or
-      // gave up on it (e.g. by encountering a local with the same in the outer
-      // scope which was not promoted to a context, this can happen if we use
-      // debugger to evaluate arbitrary expressions at a break point).
-      if (var->IsGlobalObjectProperty()) {
-        var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
-      } else if (var->is_dynamic()) {
-        var = NonLocal(proxy->raw_name(), DYNAMIC);
-      } else {
-        Variable* invalidated = var;
-        var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
-        var->set_local_if_not_shadowed(invalidated);
-      }
-      break;
-
-    case UNBOUND:
-      // No binding has been found. Declare a variable on the global object.
-      var = info->script_scope()->DeclareDynamicGlobal(proxy->raw_name(),
-                                                       Variable::NORMAL);
-      break;
-
-    case UNBOUND_EVAL_SHADOWED:
-      // No binding has been found. But some scope makes a sloppy 'eval' call.
-      var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
-      break;
-
-    case DYNAMIC_LOOKUP:
-      // The variable could not be resolved statically.
-      var = NonLocal(proxy->raw_name(), DYNAMIC);
-      break;
-  }
-
-  DCHECK(var != NULL);
+  DCHECK_NOT_NULL(var);
   if (proxy->is_assigned()) var->set_maybe_assigned();
-
   proxy->BindTo(var);
 }
 
-void Scope::ResolveVariablesRecursively(ParseInfo* info,
-                                        AstNodeFactory* factory) {
+void Scope::ResolveVariablesRecursively(ParseInfo* info) {
   DCHECK(info->script_scope()->is_script_scope());
 
   // Resolve unresolved variables for this scope.
   for (VariableProxy* proxy = unresolved_; proxy != nullptr;
        proxy = proxy->next_unresolved()) {
-    ResolveVariable(info, proxy, factory);
+    ResolveVariable(info, proxy);
   }
 
   // Resolve unresolved variables for inner scopes.
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
-    scope->ResolveVariablesRecursively(info, factory);
+    scope->ResolveVariablesRecursively(info);
   }
 }
 
 VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
-                                         ParseInfo* info,
+                                         bool try_to_resolve, ParseInfo* info,
                                          VariableProxy* stack) {
   for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
        proxy = next) {
     next = proxy->next_unresolved();
-    if (proxy->is_resolved()) continue;
-    // Note that we pass nullptr as AstNodeFactory: this phase should not create
-    // any new AstNodes, since none of the Scopes involved are backed up by
-    // ScopeInfo.
-    BindingKind binding_kind = UNBOUND;
-    Variable* var = LookupRecursive(proxy, &binding_kind, nullptr,
-                                    max_outer_scope->outer_scope());
+    DCHECK(!proxy->is_resolved());
+    Variable* var = nullptr;
+    if (try_to_resolve) {
+      var = LookupRecursive(proxy, max_outer_scope->outer_scope());
+    }
     if (var == nullptr) {
       proxy->set_next_unresolved(stack);
       stack = proxy;
     } else if (info != nullptr) {
-      DCHECK_NE(UNBOUND, binding_kind);
-      DCHECK_NE(UNBOUND_EVAL_SHADOWED, binding_kind);
-      ResolveTo(info, binding_kind, proxy, var);
+      ResolveTo(info, proxy, var);
+    } else {
+      var->set_is_used();
     }
   }
 
@@ -1443,22 +1699,13 @@
   unresolved_ = nullptr;
 
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
-    stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
+    stack =
+        scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
   }
 
   return stack;
 }
 
-void Scope::PropagateScopeInfo() {
-  for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
-    inner->PropagateScopeInfo();
-    if (IsAsmModule() && inner->is_function_scope()) {
-      inner->AsDeclarationScope()->set_asm_function();
-    }
-  }
-}
-
-
 bool Scope::MustAllocate(Variable* var) {
   DCHECK(var->location() != VariableLocation::MODULE);
   // Give var a read/write use if there is a chance it might be accessed
@@ -1511,8 +1758,8 @@
 
   bool uses_sloppy_arguments = false;
 
-  // Functions have 'arguments' declared implicitly in all non arrow functions.
   if (arguments_ != nullptr) {
+    DCHECK(!is_arrow_scope());
     // 'arguments' is used. Unless there is also a parameter called
     // 'arguments', we must be conservative and allocate all parameters to
     // the context assuming they will be captured by the arguments object.
@@ -1533,21 +1780,18 @@
       // allocate the arguments object by nulling out arguments_.
       arguments_ = nullptr;
     }
-
-  } else {
-    DCHECK(is_arrow_scope());
   }
 
   // The same parameter may occur multiple times in the parameters_ list.
   // If it does, and if it is not copied into the context object, it must
   // receive the highest parameter index for that parameter; thus iteration
   // order is relevant!
-  for (int i = params_.length() - 1; i >= 0; --i) {
-    if (i == rest_index_) continue;
+  for (int i = num_parameters() - 1; i >= 0; --i) {
     Variable* var = params_[i];
-
-    DCHECK(var->scope() == this);
+    DCHECK(!has_rest_ || var != rest_parameter());
+    DCHECK_EQ(this, var->scope());
     if (uses_sloppy_arguments) {
+      var->set_is_used();
       var->ForceContextAllocation();
     }
     AllocateParameter(var, i);
@@ -1567,8 +1811,6 @@
         var->AllocateTo(VariableLocation::PARAMETER, index);
       }
     }
-  } else {
-    DCHECK(!var->IsGlobalSlot());
   }
 }
 
@@ -1590,38 +1832,9 @@
   }
 }
 
-void Scope::AllocateDeclaredGlobal(Variable* var) {
-  DCHECK(var->scope() == this);
-  if (var->IsUnallocated()) {
-    if (var->IsStaticGlobalObjectProperty()) {
-      DCHECK_EQ(-1, var->index());
-      DCHECK(var->name()->IsString());
-      var->AllocateTo(VariableLocation::GLOBAL, num_heap_slots_++);
-      num_global_slots_++;
-    } else {
-      // There must be only DYNAMIC_GLOBAL in the script scope.
-      DCHECK(!is_script_scope() || DYNAMIC_GLOBAL == var->mode());
-    }
-  }
-}
-
 void Scope::AllocateNonParameterLocalsAndDeclaredGlobals() {
-  // All variables that have no rewrite yet are non-parameter locals.
-  if (is_declaration_scope()) {
-    ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
-    for (int i = 0; i < temps->length(); i++) {
-      AllocateNonParameterLocal((*temps)[i]);
-    }
-  }
-
-  for (int i = 0; i < ordered_variables_.length(); i++) {
-    AllocateNonParameterLocal(ordered_variables_[i]);
-  }
-
-  if (FLAG_global_var_shortcuts) {
-    for (int i = 0; i < ordered_variables_.length(); i++) {
-      AllocateDeclaredGlobal(ordered_variables_[i]);
-    }
+  for (int i = 0; i < locals_.length(); i++) {
+    AllocateNonParameterLocal(locals_[i]);
   }
 
   if (is_declaration_scope()) {
@@ -1638,8 +1851,8 @@
     AllocateNonParameterLocal(function_);
   }
 
-  DCHECK(!has_rest_parameter() || !MustAllocate(params_[rest_index_]) ||
-         !params_[rest_index_]->IsUnallocated());
+  DCHECK(!has_rest_ || !MustAllocate(rest_parameter()) ||
+         !rest_parameter()->IsUnallocated());
 
   if (new_target_ != nullptr && !MustAllocate(new_target_)) {
     new_target_ = nullptr;
@@ -1651,23 +1864,23 @@
 }
 
 void ModuleScope::AllocateModuleVariables() {
-  for (auto it = module()->regular_imports().begin();
-       it != module()->regular_imports().end(); ++it) {
-    Variable* var = LookupLocal(it->second->local_name);
+  for (const auto& it : module()->regular_imports()) {
+    Variable* var = LookupLocal(it.first);
     // TODO(neis): Use a meaningful index.
     var->AllocateTo(VariableLocation::MODULE, 42);
   }
 
-  for (auto entry : module()->exports()) {
-    if (entry->local_name == nullptr) continue;
-    Variable* var = LookupLocal(entry->local_name);
-    var->AllocateTo(VariableLocation::MODULE, 42);
+  for (const auto& it : module()->regular_exports()) {
+    Variable* var = LookupLocal(it.first);
+    var->AllocateTo(VariableLocation::MODULE, 0);
   }
 }
 
 void Scope::AllocateVariablesRecursively() {
   DCHECK(!already_resolved_);
   DCHECK_EQ(0, num_stack_slots_);
+  // Don't allocate variables of preparsed scopes.
+  if (is_lazily_parsed_) return;
 
   // Allocate variables for inner scopes.
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
@@ -1708,6 +1921,23 @@
   DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
+void Scope::AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+                                          MaybeHandle<ScopeInfo> outer_scope) {
+  DCHECK(scope_info_.is_null());
+  if (mode == AnalyzeMode::kDebugger || NeedsScopeInfo()) {
+    scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+  }
+
+  // The ScopeInfo chain should mirror the context chain, so we only link to
+  // the next outer scope that needs a context.
+  MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
+  if (NeedsContext()) next_outer_scope = scope_info_;
+
+  // Allocate ScopeInfos for inner scopes.
+  for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+    scope->AllocateScopeInfosRecursively(isolate, mode, next_outer_scope);
+  }
+}
 
 int Scope::StackLocalCount() const {
   Variable* function =
@@ -1723,12 +1953,9 @@
       is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
   bool is_function_var_in_context =
       function != nullptr && function->IsContextSlot();
-  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - num_global_slots() -
+  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
          (is_function_var_in_context ? 1 : 0);
 }
 
-
-int Scope::ContextGlobalCount() const { return num_global_slots(); }
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index 8c00927..0acff8a 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -5,15 +5,22 @@
 #ifndef V8_AST_SCOPES_H_
 #define V8_AST_SCOPES_H_
 
-#include "src/ast/ast.h"
 #include "src/base/hashmap.h"
 #include "src/globals.h"
-#include "src/zone.h"
+#include "src/objects.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
+class AstNodeFactory;
+class AstValueFactory;
+class AstRawString;
+class Declaration;
 class ParseInfo;
+class SloppyBlockFunctionStatement;
+class StringSet;
+class VariableProxy;
 
 // A hash map to support fast variable declaration and lookup.
 class VariableMap: public ZoneHashMap {
@@ -21,34 +28,14 @@
   explicit VariableMap(Zone* zone);
 
   Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
-                    VariableMode mode, Variable::Kind kind,
+                    VariableMode mode, VariableKind kind,
                     InitializationFlag initialization_flag,
                     MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
                     bool* added = nullptr);
 
   Variable* Lookup(const AstRawString* name);
-};
-
-
-// The dynamic scope part holds hash maps for the variables that will
-// be looked up dynamically from within eval and with scopes. The objects
-// are allocated on-demand from Scope::NonLocal to avoid wasting memory
-// and setup time for scopes that don't need them.
-class DynamicScopePart : public ZoneObject {
- public:
-  explicit DynamicScopePart(Zone* zone) {
-    for (int i = 0; i < 3; i++)
-      maps_[i] = new(zone->New(sizeof(VariableMap))) VariableMap(zone);
-  }
-
-  VariableMap* GetMap(VariableMode mode) {
-    int index = mode - DYNAMIC;
-    DCHECK(index >= 0 && index < 3);
-    return maps_[index];
-  }
-
- private:
-  VariableMap *maps_[3];
+  void Remove(Variable* var);
+  void Add(Zone* zone, Variable* var);
 };
 
 
@@ -60,6 +47,7 @@
                SloppyBlockFunctionStatement* statement);
 };
 
+enum class AnalyzeMode { kRegular, kDebugger };
 
 // Global invariants after AST construction: Each reference (i.e. identifier)
 // to a JavaScript variable (including global properties) is represented by a
@@ -86,6 +74,7 @@
   void SetScopeName(const AstRawString* scope_name) {
     scope_name_ = scope_name;
   }
+  void set_needs_migration() { needs_migration_ = true; }
 #endif
 
   // TODO(verwaest): Is this needed on Scope?
@@ -106,18 +95,14 @@
     Scope* outer_scope_;
     Scope* top_inner_scope_;
     VariableProxy* top_unresolved_;
-    int top_temp_;
+    int top_local_;
+    int top_decl_;
   };
 
-  // Compute top scope and allocate variables. For lazy compilation the top
-  // scope only contains the single lazily compiled function, so this
-  // doesn't re-allocate variables repeatedly.
-  static void Analyze(ParseInfo* info);
-
-  enum class DeserializationMode { kDeserializeOffHeap, kKeepScopeInfo };
+  enum class DeserializationMode { kIncludingVariables, kScopesOnly };
 
   static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
-                                      Context* context,
+                                      ScopeInfo* scope_info,
                                       DeclarationScope* script_scope,
                                       AstValueFactory* ast_value_factory,
                                       DeserializationMode deserialization_mode);
@@ -127,6 +112,11 @@
   // tree and its children are reparented.
   Scope* FinalizeBlockScope();
 
+  bool HasBeenRemoved() const;
+
+  // Find the first scope that hasn't been removed.
+  Scope* GetUnremovedScope();
+
   // Inserts outer_scope into this scope's scope chain (and removes this
   // from the current outer_scope_'s inner scope list).
   // Assumes outer_scope_ is non-null.
@@ -142,7 +132,13 @@
   // Declarations
 
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
-  Variable* LookupLocal(const AstRawString* name);
+  Variable* LookupLocal(const AstRawString* name) {
+    Variable* result = variables_.Lookup(name);
+    if (result != nullptr || scope_info_.is_null()) return result;
+    return LookupInScopeInfo(name);
+  }
+
+  Variable* LookupInScopeInfo(const AstRawString* name);
 
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
@@ -151,36 +147,28 @@
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
   Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
-                         InitializationFlag init_flag, Variable::Kind kind,
+                         InitializationFlag init_flag, VariableKind kind,
                          MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
+  Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
+                            InitializationFlag init,
+                            bool allow_harmony_restrictive_generators,
+                            bool* sloppy_mode_block_scope_function_redefinition,
+                            bool* ok);
+
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
 
+  ZoneList<Variable*>* locals() { return &locals_; }
+
   // Create a new unresolved variable.
   VariableProxy* NewUnresolved(AstNodeFactory* factory,
                                const AstRawString* name,
                                int start_position = kNoSourcePosition,
                                int end_position = kNoSourcePosition,
-                               Variable::Kind kind = Variable::NORMAL) {
-    // Note that we must not share the unresolved variables with
-    // the same name because they may be removed selectively via
-    // RemoveUnresolved().
-    DCHECK(!already_resolved_);
-    DCHECK_EQ(factory->zone(), zone());
-    VariableProxy* proxy =
-        factory->NewVariableProxy(name, kind, start_position, end_position);
-    proxy->set_next_unresolved(unresolved_);
-    unresolved_ = proxy;
-    return proxy;
-  }
+                               VariableKind kind = NORMAL_VARIABLE);
 
-  void AddUnresolved(VariableProxy* proxy) {
-    DCHECK(!already_resolved_);
-    DCHECK(!proxy->is_resolved());
-    proxy->set_next_unresolved(unresolved_);
-    unresolved_ = proxy;
-  }
+  void AddUnresolved(VariableProxy* proxy);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -189,6 +177,7 @@
   // allocated globally as a "ghost" variable. RemoveUnresolved removes
   // such a variable again if it was added; otherwise this is a no-op.
   bool RemoveUnresolved(VariableProxy* var);
+  bool RemoveUnresolved(const AstRawString* name);
 
   // Creates a new temporary variable in this scope's TemporaryScope.  The
   // name is only used for printing and cannot be used to find the variable.
@@ -198,11 +187,6 @@
   // TODO(verwaest): Move to DeclarationScope?
   Variable* NewTemporary(const AstRawString* name);
 
-  // Adds the specific declaration node to the list of declarations in
-  // this scope. The declarations are processed as part of entering
-  // the scope; see codegen.cc:ProcessDeclarations.
-  void AddDeclaration(Declaration* declaration);
-
   // ---------------------------------------------------------------------------
   // Illegal redeclaration support.
 
@@ -223,10 +207,15 @@
   // Scope-specific info.
 
   // Inform the scope and outer scopes that the corresponding code contains an
-  // eval call.
+  // eval call. We don't record eval calls from innner scopes in the outer most
+  // script scope, as we only see those when parsing eagerly. If we recorded the
+  // calls then, the outer most script scope would look different depending on
+  // whether we parsed eagerly or not which is undesirable.
   void RecordEvalCall() {
     scope_calls_eval_ = true;
-    for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
+    inner_scope_calls_eval_ = true;
+    for (Scope* scope = outer_scope(); scope && !scope->is_script_scope();
+         scope = scope->outer_scope()) {
       scope->inner_scope_calls_eval_ = true;
     }
   }
@@ -353,24 +342,16 @@
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
-  // Collect stack and context allocated local variables in this scope. Note
-  // that the function variable - if present - is not collected and should be
-  // handled separately.
-  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
-                                    ZoneList<Variable*>* context_locals,
-                                    ZoneList<Variable*>* context_globals);
-
   // Result of variable allocation.
   int num_stack_slots() const { return num_stack_slots_; }
   int num_heap_slots() const { return num_heap_slots_; }
-  int num_global_slots() const { return num_global_slots_; }
 
   int StackLocalCount() const;
   int ContextLocalCount() const;
-  int ContextGlobalCount() const;
 
-  // Determine if we can parse a function literal in this scope lazily.
-  bool AllowsLazyParsing() const;
+  // Determine if we can parse a function literal in this scope lazily without
+  // caring about the unresolved variables within.
+  bool AllowsLazyParsingWithoutUnresolvedVariables() const;
 
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope) const;
@@ -398,10 +379,13 @@
   // 'this' is bound, and what determines the function kind.
   DeclarationScope* GetReceiverScope();
 
-  // Creates a scope info if it doesn't already exist.
-  Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
+  // Find the module scope, assuming there is one.
+  ModuleScope* GetModuleScope();
 
-  // GetScopeInfo() must have been called once to create the ScopeInfo.
+  // Find the innermost outer scope that needs a context.
+  Scope* GetOuterScopeWithContext();
+
+  // Analyze() must have been called once to create the ScopeInfo.
   Handle<ScopeInfo> scope_info() {
     DCHECK(!scope_info_.is_null());
     return scope_info_;
@@ -436,9 +420,11 @@
   // Retrieve `IsSimpleParameterList` of current or outer function.
   bool HasSimpleParameters();
   void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
+  bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
+
+  bool is_lazily_parsed() const { return is_lazily_parsed_; }
 
  protected:
-  // Creates a script scope.
   explicit Scope(Zone* zone);
 
   void set_language_mode(LanguageMode language_mode) {
@@ -447,16 +433,32 @@
 
  private:
   Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
-                    VariableMode mode, Variable::Kind kind,
+                    VariableMode mode, VariableKind kind,
                     InitializationFlag initialization_flag,
                     MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) {
     bool added;
     Variable* var =
         variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
                            maybe_assigned_flag, &added);
-    if (added) ordered_variables_.Add(var, zone);
+    if (added) locals_.Add(var, zone);
     return var;
   }
+
+  // This method should only be invoked on scopes created during parsing (i.e.,
+  // not deserialized from a context). Also, since NeedsContext() is only
+  // returning a valid result after variables are resolved, NeedsScopeInfo()
+  // should also be invoked after resolution.
+  bool NeedsScopeInfo() const {
+    DCHECK(!already_resolved_);
+    // A lazily parsed scope doesn't contain enough information to create a
+    // ScopeInfo from it.
+    if (is_lazily_parsed_) return false;
+    // The debugger expects all functions to have scope infos.
+    // TODO(jochen|yangguo): Remove this requirement.
+    if (is_function_scope()) return true;
+    return NeedsContext();
+  }
+
   Zone* zone_;
 
   // Scope tree.
@@ -473,9 +475,7 @@
   // In case of non-scopeinfo-backed scopes, this contains the variables of the
   // map above in order of addition.
   // TODO(verwaest): Thread through Variable.
-  ZoneList<Variable*> ordered_variables_;
-  // Variables that must be looked up dynamically.
-  DynamicScopePart* dynamics_;
+  ZoneList<Variable*> locals_;
   // Unresolved variables referred to from this scope. The proxies themselves
   // form a linked list of all unresolved proxies.
   VariableProxy* unresolved_;
@@ -490,7 +490,10 @@
 
   // True if it doesn't need scope resolution (e.g., if the scope was
   // constructed based on a serialized scope info or a catch context).
-  bool already_resolved_ : 1;
+  bool already_resolved_;
+  // True if this scope may contain objects from a temp zone that needs to be
+  // fixed up.
+  bool needs_migration_;
 #endif
 
   // Source positions.
@@ -500,7 +503,6 @@
   // Computed via AllocateVariables.
   int num_stack_slots_;
   int num_heap_slots_;
-  int num_global_slots_;
 
   // The scope type.
   const ScopeType scope_type_;
@@ -525,79 +527,30 @@
   // True if it holds 'var' declarations.
   bool is_declaration_scope_ : 1;
 
+  bool is_lazily_parsed_ : 1;
+
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
   Variable* NonLocal(const AstRawString* name, VariableMode mode);
 
   // Variable resolution.
-  // Possible results of a recursive variable lookup telling if and how a
-  // variable is bound. These are returned in the output parameter *binding_kind
-  // of the LookupRecursive function.
-  enum BindingKind {
-    // The variable reference could be statically resolved to a variable binding
-    // which is returned. There is no 'with' statement between the reference and
-    // the binding and no scope between the reference scope (inclusive) and
-    // binding scope (exclusive) makes a sloppy 'eval' call.
-    BOUND,
-
-    // The variable reference could be statically resolved to a variable binding
-    // which is returned. There is no 'with' statement between the reference and
-    // the binding, but some scope between the reference scope (inclusive) and
-    // binding scope (exclusive) makes a sloppy 'eval' call, that might
-    // possibly introduce variable bindings shadowing the found one. Thus the
-    // found variable binding is just a guess.
-    BOUND_EVAL_SHADOWED,
-
-    // The variable reference could not be statically resolved to any binding
-    // and thus should be considered referencing a global variable. NULL is
-    // returned. The variable reference is not inside any 'with' statement and
-    // no scope between the reference scope (inclusive) and script scope
-    // (exclusive) makes a sloppy 'eval' call.
-    UNBOUND,
-
-    // The variable reference could not be statically resolved to any binding
-    // NULL is returned. The variable reference is not inside any 'with'
-    // statement, but some scope between the reference scope (inclusive) and
-    // script scope (exclusive) makes a sloppy 'eval' call, that might
-    // possibly introduce a variable binding. Thus the reference should be
-    // considered referencing a global variable unless it is shadowed by an
-    // 'eval' introduced binding.
-    UNBOUND_EVAL_SHADOWED,
-
-    // The variable could not be statically resolved and needs to be looked up
-    // dynamically. NULL is returned. There are two possible reasons:
-    // * A 'with' statement has been encountered and there is no variable
-    //   binding for the name between the variable reference and the 'with'.
-    //   The variable potentially references a property of the 'with' object.
-    // * The code is being executed as part of a call to 'eval' and the calling
-    //   context chain contains either a variable binding for the name or it
-    //   contains a 'with' context.
-    DYNAMIC_LOOKUP
-  };
-
   // Lookup a variable reference given by name recursively starting with this
   // scope, and stopping when reaching the outer_scope_end scope. If the code is
   // executed because of a call to 'eval', the context parameter should be set
   // to the calling context of 'eval'.
-  Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind,
-                            AstNodeFactory* factory,
-                            Scope* outer_scope_end = nullptr);
-  void ResolveTo(ParseInfo* info, BindingKind binding_kind,
-                 VariableProxy* proxy, Variable* var);
-  void ResolveVariable(ParseInfo* info, VariableProxy* proxy,
-                       AstNodeFactory* factory);
-  void ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
+  Variable* LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end);
+  void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
+  void ResolveVariable(ParseInfo* info, VariableProxy* proxy);
+  void ResolveVariablesRecursively(ParseInfo* info);
 
   // Finds free variables of this scope. This mutates the unresolved variables
   // list along the way, so full resolution cannot be done afterwards.
   // If a ParseInfo* is passed, non-free variables will be resolved.
   VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
+                                    bool try_to_resolve = true,
                                     ParseInfo* info = nullptr,
                                     VariableProxy* stack = nullptr);
 
-  // Scope analysis.
-  void PropagateScopeInfo();
-
   // Predicates.
   bool MustAllocate(Variable* var);
   bool MustAllocateInContext(Variable* var);
@@ -610,15 +563,18 @@
   void AllocateNonParameterLocalsAndDeclaredGlobals();
   void AllocateVariablesRecursively();
 
+  void AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+                                     MaybeHandle<ScopeInfo> outer_scope);
+
   // Construct a scope based on the scope info.
-  Scope(Zone* zone, Scope* inner_scope, ScopeType type,
-        Handle<ScopeInfo> scope_info);
+  Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
 
   // Construct a catch scope with a binding for the name.
-  Scope(Zone* zone, Scope* inner_scope,
-        const AstRawString* catch_variable_name);
+  Scope(Zone* zone, const AstRawString* catch_variable_name,
+        Handle<ScopeInfo> scope_info);
 
   void AddInnerScope(Scope* inner_scope) {
+    DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
     inner_scope->sibling_ = inner_scope_;
     inner_scope_ = inner_scope;
     inner_scope->outer_scope_ = this;
@@ -641,9 +597,6 @@
 
   void SetDefaults();
 
-  void DeserializeScopeInfo(Isolate* isolate,
-                            AstValueFactory* ast_value_factory);
-
   friend class DeclarationScope;
 };
 
@@ -651,10 +604,10 @@
  public:
   DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
                    FunctionKind function_kind = kNormalFunction);
-  DeclarationScope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
+  DeclarationScope(Zone* zone, ScopeType scope_type,
                    Handle<ScopeInfo> scope_info);
   // Creates a script scope.
-  explicit DeclarationScope(Zone* zone);
+  DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory);
 
   bool IsDeclaredParameter(const AstRawString* name) {
     // If IsSimpleParameterList is false, duplicate parameters are not allowed,
@@ -681,23 +634,29 @@
                                         IsClassConstructor(function_kind())));
   }
 
+  void SetScriptScopeInfo(Handle<ScopeInfo> scope_info) {
+    DCHECK(is_script_scope());
+    DCHECK(scope_info_.is_null());
+    scope_info_ = scope_info;
+  }
+
   bool asm_module() const { return asm_module_; }
-  void set_asm_module() { asm_module_ = true; }
+  void set_asm_module();
   bool asm_function() const { return asm_function_; }
   void set_asm_function() { asm_module_ = true; }
 
   void DeclareThis(AstValueFactory* ast_value_factory);
+  void DeclareArguments(AstValueFactory* ast_value_factory);
   void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
 
-  // This lookup corresponds to a lookup in the "intermediate" scope sitting
-  // between this scope and the outer scope. (ECMA-262, 3rd., requires that
-  // the name of named function literal is kept in an intermediate scope
-  // in between this scope and the next outer scope.)
-  Variable* LookupFunctionVar(const AstRawString* name);
-
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
+  //
+  // This function needs to be called after all other variables have been
+  // declared in the scope. It will add a variable for {name} to {variables_};
+  // either the function variable itself, or a non-local in case the function
+  // calls sloppy eval.
   Variable* DeclareFunctionVar(const AstRawString* name);
 
   // Declare a parameter in this scope.  When there are duplicated
@@ -712,7 +671,7 @@
   // scope) by a reference to an unresolved variable with no intervening
   // with statements or eval calls.
   Variable* DeclareDynamicGlobal(const AstRawString* name,
-                                 Variable::Kind variable_kind);
+                                 VariableKind variable_kind);
 
   // The variable corresponding to the 'this' value.
   Variable* receiver() {
@@ -739,43 +698,36 @@
   }
 
   // Parameters. The left-most parameter has index 0.
-  // Only valid for function scopes.
+  // Only valid for function and module scopes.
   Variable* parameter(int index) const {
-    DCHECK(is_function_scope());
+    DCHECK(is_function_scope() || is_module_scope());
     return params_[index];
   }
 
   // Returns the default function arity excluding default or rest parameters.
-  int default_function_length() const { return arity_; }
+  // This will be used to set the length of the function, by default.
+  // Class field initializers use this property to indicate the number of
+  // fields being initialized.
+  int arity() const { return arity_; }
 
-  // Returns the number of formal parameters, up to but not including the
-  // rest parameter index (if the function has rest parameters), i.e. it
-  // says 2 for
-  //
-  //   function foo(a, b) { ... }
-  //
-  // and
-  //
-  //   function foo(a, b, ...c) { ... }
-  //
-  // but for
-  //
-  //   function foo(a, b, c = 1) { ... }
-  //
-  // we return 3 here.
+  // Normal code should not need to call this. Class field initializers use this
+  // property to indicate the number of fields being initialized.
+  void set_arity(int arity) { arity_ = arity; }
+
+  // Returns the number of formal parameters, excluding a possible rest
+  // parameter.  Examples:
+  //   function foo(a, b) {}         ==> 2
+  //   function foo(a, b, ...c) {}   ==> 2
+  //   function foo(a, b, c = 1) {}  ==> 3
   int num_parameters() const {
-    return has_rest_parameter() ? params_.length() - 1 : params_.length();
+    return has_rest_ ? params_.length() - 1 : params_.length();
   }
 
-  // A function can have at most one rest parameter. Returns Variable* or NULL.
-  Variable* rest_parameter(int* index) const {
-    *index = rest_index_;
-    if (rest_index_ < 0) return nullptr;
-    return params_[rest_index_];
+  // The function's rest parameter (nullptr if there is none).
+  Variable* rest_parameter() const {
+    return has_rest_ ? params_[params_.length() - 1] : nullptr;
   }
 
-  bool has_rest_parameter() const { return rest_index_ >= 0; }
-
   bool has_simple_parameters() const { return has_simple_parameters_; }
 
   // TODO(caitp): manage this state in a better way. PreParser must be able to
@@ -803,44 +755,40 @@
     return this_function_;
   }
 
-  // Adds a temporary variable in this scope's TemporaryScope. This is for
-  // adjusting the scope of temporaries used when desugaring parameter
+  // Adds a local variable in this scope's locals list. This is for adjusting
+  // the scope of temporaries and do-expression vars when desugaring parameter
   // initializers.
-  void AddTemporary(Variable* var) {
+  void AddLocal(Variable* var) {
     DCHECK(!already_resolved_);
     // Temporaries are only placed in ClosureScopes.
     DCHECK_EQ(GetClosureScope(), this);
-    temps_.Add(var, zone());
+    locals_.Add(var, zone());
   }
 
-  ZoneList<Variable*>* temps() { return &temps_; }
-
   void DeclareSloppyBlockFunction(const AstRawString* name,
                                   SloppyBlockFunctionStatement* statement) {
     sloppy_block_function_map_.Declare(zone(), name, statement);
   }
 
+  // Go through sloppy_block_function_map_ and hoist those (into this scope)
+  // which should be hoisted.
+  void HoistSloppyBlockFunctions(AstNodeFactory* factory);
+
   SloppyBlockFunctionMap* sloppy_block_function_map() {
     return &sloppy_block_function_map_;
   }
 
-  // Resolve and fill in the allocation information for all variables
-  // in this scopes. Must be called *after* all scopes have been
-  // processed (parsed) to ensure that unresolved variables can be
-  // resolved properly.
-  //
-  // In the case of code compiled and run using 'eval', the context
-  // parameter is the context in which eval was called.  In all other
-  // cases the context parameter is an empty handle.
-  void AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
+  // Compute top scope and allocate variables. For lazy compilation the top
+  // scope only contains the single lazily compiled function, so this
+  // doesn't re-allocate variables repeatedly.
+  static void Analyze(ParseInfo* info, AnalyzeMode mode);
 
   // To be called during parsing. Do just enough scope analysis that we can
   // discard the Scope for lazily compiled functions. In particular, this
   // records variables which cannot be resolved inside the Scope (we don't yet
   // know what they will resolve to since the outer Scopes are incomplete) and
   // migrates them into migrate_to.
-  void AnalyzePartially(DeclarationScope* migrate_to,
-                        AstNodeFactory* ast_node_factory);
+  void AnalyzePartially(AstNodeFactory* ast_node_factory);
 
   Handle<StringSet> CollectNonLocals(ParseInfo* info,
                                      Handle<StringSet> non_locals);
@@ -868,9 +816,21 @@
   void AllocateParameterLocals();
   void AllocateReceiver();
 
+  void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
+
  private:
   void AllocateParameter(Variable* var, int index);
 
+  // Resolve and fill in the allocation information for all variables
+  // in this scopes. Must be called *after* all scopes have been
+  // processed (parsed) to ensure that unresolved variables can be
+  // resolved properly.
+  //
+  // In the case of code compiled and run using 'eval', the context
+  // parameter is the context in which eval was called.  In all other
+  // cases the context parameter is an empty handle.
+  void AllocateVariables(ParseInfo* info, AnalyzeMode mode);
+
   void SetDefaults();
 
   // If the scope is a function scope, this is the function kind.
@@ -882,6 +842,8 @@
   // This scope's outer context is an asm module.
   bool asm_function_ : 1;
   bool force_eager_compilation_ : 1;
+  // This function scope has a rest parameter.
+  bool has_rest_ : 1;
   // This scope has a parameter called "arguments".
   bool has_arguments_parameter_ : 1;
   // This scope uses "super" property ('super.foo').
@@ -889,9 +851,6 @@
 
   // Info about the parameter list of a function.
   int arity_;
-  int rest_index_;
-  // Compiler-allocated (user-invisible) temporaries.
-  ZoneList<Variable*> temps_;
   // Parameter list in source order.
   ZoneList<Variable*> params_;
   // Map of function names to lists of functions defined in sloppy blocks
@@ -910,7 +869,14 @@
 
 class ModuleScope final : public DeclarationScope {
  public:
-  ModuleScope(Zone* zone, DeclarationScope* script_scope,
+  ModuleScope(DeclarationScope* script_scope,
+              AstValueFactory* ast_value_factory);
+
+  // Deserialization.
+  // The generated ModuleDescriptor does not preserve all information.  In
+  // particular, its module_requests map will be empty because we no longer need
+  // the map after parsing.
+  ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
               AstValueFactory* ast_value_factory);
 
   ModuleDescriptor* module() const {
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index 0541f94..cc269cd 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -13,36 +13,20 @@
 // ----------------------------------------------------------------------------
 // Implementation Variable.
 
-const char* Variable::Mode2String(VariableMode mode) {
-  switch (mode) {
-    case VAR: return "VAR";
-    case CONST_LEGACY: return "CONST_LEGACY";
-    case LET: return "LET";
-    case CONST: return "CONST";
-    case DYNAMIC: return "DYNAMIC";
-    case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
-    case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
-    case TEMPORARY: return "TEMPORARY";
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
 Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
-                   Kind kind, InitializationFlag initialization_flag,
+                   VariableKind kind, InitializationFlag initialization_flag,
                    MaybeAssignedFlag maybe_assigned_flag)
     : scope_(scope),
       name_(name),
-      mode_(mode),
-      kind_(kind),
-      location_(VariableLocation::UNALLOCATED),
+      local_if_not_shadowed_(nullptr),
       index_(-1),
       initializer_position_(kNoSourcePosition),
-      local_if_not_shadowed_(NULL),
-      force_context_allocation_(false),
-      is_used_(false),
-      initialization_flag_(initialization_flag),
-      maybe_assigned_(maybe_assigned_flag) {
+      bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
+                 InitializationFlagField::encode(initialization_flag) |
+                 VariableModeField::encode(mode) | IsUsedField::encode(false) |
+                 ForceContextAllocationField::encode(false) |
+                 LocationField::encode(VariableLocation::UNALLOCATED) |
+                 VariableKindField::encode(kind)) {
   // Var declared variables never need initialization.
   DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
@@ -51,8 +35,8 @@
 bool Variable::IsGlobalObjectProperty() const {
   // Temporaries are never global, they must always be allocated in the
   // activation frame.
-  return (IsDynamicVariableMode(mode_) ||
-          (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_))) &&
+  return (IsDynamicVariableMode(mode()) ||
+          (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode()))) &&
          scope_ != NULL && scope_->is_script_scope();
 }
 
@@ -60,17 +44,10 @@
 bool Variable::IsStaticGlobalObjectProperty() const {
   // Temporaries are never global, they must always be allocated in the
   // activation frame.
-  return (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)) &&
+  return (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode())) &&
          scope_ != NULL && scope_->is_script_scope();
 }
 
 
-int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
-  int x = (*v)->index();
-  int y = (*w)->index();
-  // Consider sorting them according to type as well?
-  return x - y;
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/variables.h b/src/ast/variables.h
index f1f63b8..5bc7869 100644
--- a/src/ast/variables.h
+++ b/src/ast/variables.h
@@ -6,7 +6,8 @@
 #define V8_AST_VARIABLES_H_
 
 #include "src/ast/ast-value-factory.h"
-#include "src/zone.h"
+#include "src/globals.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -17,15 +18,10 @@
 // after binding and variable allocation.
 class Variable final : public ZoneObject {
  public:
-  enum Kind { NORMAL, FUNCTION, THIS, ARGUMENTS };
-
-  Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
-           InitializationFlag initialization_flag,
+  Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+           VariableKind kind, InitializationFlag initialization_flag,
            MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
-  // Printing support
-  static const char* Mode2String(VariableMode mode);
-
   // The source code for an eval() call may refer to a variable that is
   // in an outer scope about which we don't know anything (it may not
   // be the script scope). scope() is NULL in that case. Currently the
@@ -38,51 +34,56 @@
 
   Handle<String> name() const { return name_->string(); }
   const AstRawString* raw_name() const { return name_; }
-  VariableMode mode() const { return mode_; }
+  VariableMode mode() const { return VariableModeField::decode(bit_field_); }
   bool has_forced_context_allocation() const {
-    return force_context_allocation_;
+    return ForceContextAllocationField::decode(bit_field_);
   }
   void ForceContextAllocation() {
-    DCHECK(IsUnallocated() || IsContextSlot());
-    force_context_allocation_ = true;
+    DCHECK(IsUnallocated() || IsContextSlot() ||
+           location() == VariableLocation::MODULE);
+    bit_field_ = ForceContextAllocationField::update(bit_field_, true);
   }
-  bool is_used() { return is_used_; }
-  void set_is_used() { is_used_ = true; }
-  MaybeAssignedFlag maybe_assigned() const { return maybe_assigned_; }
-  void set_maybe_assigned() { maybe_assigned_ = kMaybeAssigned; }
+  bool is_used() { return IsUsedField::decode(bit_field_); }
+  void set_is_used() { bit_field_ = IsUsedField::update(bit_field_, true); }
+  MaybeAssignedFlag maybe_assigned() const {
+    return MaybeAssignedFlagField::decode(bit_field_);
+  }
+  void set_maybe_assigned() {
+    bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
+  }
 
   int initializer_position() { return initializer_position_; }
   void set_initializer_position(int pos) { initializer_position_ = pos; }
 
   bool IsUnallocated() const {
-    return location_ == VariableLocation::UNALLOCATED;
+    return location() == VariableLocation::UNALLOCATED;
   }
-  bool IsParameter() const { return location_ == VariableLocation::PARAMETER; }
-  bool IsStackLocal() const { return location_ == VariableLocation::LOCAL; }
+  bool IsParameter() const { return location() == VariableLocation::PARAMETER; }
+  bool IsStackLocal() const { return location() == VariableLocation::LOCAL; }
   bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
-  bool IsContextSlot() const { return location_ == VariableLocation::CONTEXT; }
-  bool IsGlobalSlot() const { return location_ == VariableLocation::GLOBAL; }
-  bool IsUnallocatedOrGlobalSlot() const {
-    return IsUnallocated() || IsGlobalSlot();
-  }
-  bool IsLookupSlot() const { return location_ == VariableLocation::LOOKUP; }
+  bool IsContextSlot() const { return location() == VariableLocation::CONTEXT; }
+  bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; }
   bool IsGlobalObjectProperty() const;
   bool IsStaticGlobalObjectProperty() const;
 
-  bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
-  bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
+  bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
   bool binding_needs_init() const {
-    DCHECK(initialization_flag_ != kNeedsInitialization ||
-           IsLexicalVariableMode(mode_));
-    return initialization_flag_ == kNeedsInitialization;
+    DCHECK(initialization_flag() != kNeedsInitialization ||
+           IsLexicalVariableMode(mode()));
+    return initialization_flag() == kNeedsInitialization;
+  }
+  bool throw_on_const_assignment(LanguageMode language_mode) const {
+    return kind() != SLOPPY_FUNCTION_NAME_VARIABLE || is_strict(language_mode);
   }
 
-  bool is_function() const { return kind_ == FUNCTION; }
-  bool is_this() const { return kind_ == THIS; }
-  bool is_arguments() const { return kind_ == ARGUMENTS; }
+  bool is_function() const { return kind() == FUNCTION_VARIABLE; }
+  bool is_this() const { return kind() == THIS_VARIABLE; }
+  bool is_sloppy_function_name() const {
+    return kind() == SLOPPY_FUNCTION_NAME_VARIABLE;
+  }
 
   Variable* local_if_not_shadowed() const {
-    DCHECK(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
+    DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
     return local_if_not_shadowed_;
   }
 
@@ -90,40 +91,61 @@
     local_if_not_shadowed_ = local;
   }
 
-  VariableLocation location() const { return location_; }
-  int index() const { return index_; }
+  VariableLocation location() const {
+    return LocationField::decode(bit_field_);
+  }
+  VariableKind kind() const { return VariableKindField::decode(bit_field_); }
   InitializationFlag initialization_flag() const {
-    return initialization_flag_;
+    return InitializationFlagField::decode(bit_field_);
+  }
+
+  int index() const { return index_; }
+
+  bool IsExport() const {
+    DCHECK(location() == VariableLocation::MODULE);
+    return index() == 0;
   }
 
   void AllocateTo(VariableLocation location, int index) {
-    DCHECK(IsUnallocated() || (location_ == location && index_ == index));
-    location_ = location;
+    DCHECK(IsUnallocated() ||
+           (this->location() == location && this->index() == index));
+    bit_field_ = LocationField::update(bit_field_, location);
+    DCHECK_EQ(location, this->location());
     index_ = index;
   }
 
-  static int CompareIndex(Variable* const* v, Variable* const* w);
+  static InitializationFlag DefaultInitializationFlag(VariableMode mode) {
+    DCHECK(IsDeclaredVariableMode(mode));
+    return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
+  }
 
  private:
   Scope* scope_;
   const AstRawString* name_;
-  VariableMode mode_;
-  Kind kind_;
-  VariableLocation location_;
-  int index_;
-  int initializer_position_;
 
   // If this field is set, this variable references the stored locally bound
   // variable, but it might be shadowed by variable bindings introduced by
   // sloppy 'eval' calls between the reference scope (inclusive) and the
   // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
+  int index_;
+  int initializer_position_;
+  uint16_t bit_field_;
 
-  // Usage info.
-  bool force_context_allocation_;  // set by variable resolver
-  bool is_used_;
-  InitializationFlag initialization_flag_;
-  MaybeAssignedFlag maybe_assigned_;
+  class VariableModeField : public BitField16<VariableMode, 0, 3> {};
+  class VariableKindField
+      : public BitField16<VariableKind, VariableModeField::kNext, 3> {};
+  class LocationField
+      : public BitField16<VariableLocation, VariableKindField::kNext, 3> {};
+  class ForceContextAllocationField
+      : public BitField16<bool, LocationField::kNext, 1> {};
+  class IsUsedField
+      : public BitField16<bool, ForceContextAllocationField::kNext, 1> {};
+  class InitializationFlagField
+      : public BitField16<InitializationFlag, IsUsedField::kNext, 2> {};
+  class MaybeAssignedFlagField
+      : public BitField16<MaybeAssignedFlag, InitializationFlagField::kNext,
+                          2> {};
 };
 }  // namespace internal
 }  // namespace v8
diff --git a/src/background-parsing-task.cc b/src/background-parsing-task.cc
index 5df46c8..83075c1 100644
--- a/src/background-parsing-task.cc
+++ b/src/background-parsing-task.cc
@@ -3,11 +3,19 @@
 // found in the LICENSE file.
 
 #include "src/background-parsing-task.h"
+
 #include "src/debug/debug.h"
+#include "src/parsing/parser.h"
 
 namespace v8 {
 namespace internal {
 
+void StreamedSource::Release() {
+  parser.reset();
+  info.reset();
+  zone.reset();
+}
+
 BackgroundParsingTask::BackgroundParsingTask(
     StreamedSource* source, ScriptCompiler::CompileOptions options,
     int stack_size, Isolate* isolate)
@@ -42,9 +50,8 @@
   // Parser needs to stay alive for finalizing the parsing on the main
   // thread.
   source_->parser.reset(new Parser(source_->info.get()));
-  source_->parser->DeserializeScopeChain(
-      source_->info.get(), Handle<Context>::null(),
-      Scope::DeserializationMode::kDeserializeOffHeap);
+  source_->parser->DeserializeScopeChain(source_->info.get(),
+                                         MaybeHandle<ScopeInfo>());
 }
 
 
@@ -55,8 +62,7 @@
 
   // Reset the stack limit of the parser to reflect correctly that we're on a
   // background thread.
-  uintptr_t stack_limit =
-      reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
+  uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
   source_->parser->set_stack_limit(stack_limit);
 
   // Nullify the Isolate temporarily so that the background parser doesn't
diff --git a/src/background-parsing-task.h b/src/background-parsing-task.h
index 1bf9d74..d7fe6ba 100644
--- a/src/background-parsing-task.h
+++ b/src/background-parsing-task.h
@@ -7,15 +7,16 @@
 
 #include <memory>
 
+#include "include/v8.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/semaphore.h"
-#include "src/compiler.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/unicode-cache.h"
 
 namespace v8 {
 namespace internal {
 
+class Parser;
 class ScriptData;
 
 // Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
@@ -26,6 +27,8 @@
                  ScriptCompiler::StreamedSource::Encoding encoding)
       : source_stream(source_stream), encoding(encoding) {}
 
+  void Release();
+
   // Internal implementation of v8::ScriptCompiler::StreamedSource.
   std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
   ScriptCompiler::StreamedSource::Encoding encoding;
@@ -39,10 +42,9 @@
   std::unique_ptr<ParseInfo> info;
   std::unique_ptr<Parser> parser;
 
- private:
-  // Prevent copying. Not implemented.
-  StreamedSource(const StreamedSource&);
-  StreamedSource& operator=(const StreamedSource&);
+  // Prevent copying.
+  StreamedSource(const StreamedSource&) = delete;
+  StreamedSource& operator=(const StreamedSource&) = delete;
 };
 
 
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index df47eb8..6b7da16 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -20,7 +20,6 @@
   V(kArgumentsObjectValueInATestContext,                                       \
     "Arguments object value in a test context")                                \
   V(kArrayIndexConstantValueTooBig, "Array index constant value too big")      \
-  V(kAssignmentToArguments, "Assignment to arguments")                         \
   V(kAssignmentToLetVariableBeforeInitialization,                              \
     "Assignment to let variable before initialization")                        \
   V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable")              \
@@ -64,6 +63,8 @@
   V(kEval, "eval")                                                             \
   V(kExpectedAllocationSite, "Expected allocation site")                       \
   V(kExpectedBooleanValue, "Expected boolean value")                           \
+  V(kExpectedFixedDoubleArrayMap,                                              \
+    "Expected a fixed double array map in fast shallow clone array literal")   \
   V(kExpectedFunctionObject, "Expected function object in register")           \
   V(kExpectedHeapNumber, "Expected HeapNumber")                                \
   V(kExpectedJSReceiver, "Expected object to have receiver type")              \
@@ -242,10 +243,6 @@
   V(kUnexpectedTypeForRegExpDataFixedArrayExpected,                            \
     "Unexpected type for RegExp data, FixedArray expected")                    \
   V(kUnexpectedValue, "Unexpected value")                                      \
-  V(kUnsupportedConstCompoundAssignment,                                       \
-    "Unsupported const compound assignment")                                   \
-  V(kUnsupportedCountOperationWithConst,                                       \
-    "Unsupported count operation with const")                                  \
   V(kUnsupportedDoubleImmediate, "Unsupported double immediate")               \
   V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment")  \
   V(kUnsupportedLookupSlotInDeclaration,                                       \
@@ -268,9 +265,7 @@
   V(kWrongArgumentCountForInvokeIntrinsic,                                     \
     "Wrong number of arguments for intrinsic")                                 \
   V(kShouldNotDirectlyEnterOsrFunction,                                        \
-    "Should not directly enter OSR-compiled function")                         \
-  V(kConversionFromImpossibleValue,                                            \
-    "Reached conversion from value with empty type (i.e., impossible type)")
+    "Should not directly enter OSR-compiled function")
 
 #define ERROR_MESSAGES_CONSTANTS(C, T) C,
 enum BailoutReason {
diff --git a/src/base.isolate b/src/base.isolate
index a9cfc89..c457f00 100644
--- a/src/base.isolate
+++ b/src/base.isolate
@@ -4,7 +4,6 @@
 {
   'includes': [
     '../third_party/icu/icu.isolate',
-    '../gypfiles/config/win/msvs_dependencies.isolate',
   ],
   'conditions': [
     ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
@@ -15,13 +14,6 @@
         ],
       },
     }],
-    ['OS=="mac" and asan==1', {
-      'variables': {
-        'files': [
-          '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
-        ],
-      },
-    }],
     ['tsan==1', {
       'variables': {
         'files': [
diff --git a/src/base/accounting-allocator.cc b/src/base/accounting-allocator.cc
deleted file mode 100644
index c56f037..0000000
--- a/src/base/accounting-allocator.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/accounting-allocator.h"
-
-#include <cstdlib>
-
-#if V8_LIBC_BIONIC
-#include <malloc.h>  // NOLINT
-#endif
-
-namespace v8 {
-namespace base {
-
-void* AccountingAllocator::Allocate(size_t bytes) {
-  void* memory = malloc(bytes);
-  if (memory) {
-    AtomicWord current =
-        NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
-    AtomicWord max = NoBarrier_Load(&max_memory_usage_);
-    while (current > max) {
-      max = NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
-    }
-  }
-  return memory;
-}
-
-void AccountingAllocator::Free(void* memory, size_t bytes) {
-  free(memory);
-  NoBarrier_AtomicIncrement(&current_memory_usage_,
-                            -static_cast<AtomicWord>(bytes));
-}
-
-size_t AccountingAllocator::GetCurrentMemoryUsage() const {
-  return NoBarrier_Load(&current_memory_usage_);
-}
-
-size_t AccountingAllocator::GetMaxMemoryUsage() const {
-  return NoBarrier_Load(&max_memory_usage_);
-}
-
-}  // namespace base
-}  // namespace v8
diff --git a/src/base/accounting-allocator.h b/src/base/accounting-allocator.h
deleted file mode 100644
index 4e1baf1..0000000
--- a/src/base/accounting-allocator.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_ACCOUNTING_ALLOCATOR_H_
-#define V8_BASE_ACCOUNTING_ALLOCATOR_H_
-
-#include "src/base/atomicops.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-class AccountingAllocator {
- public:
-  AccountingAllocator() = default;
-  virtual ~AccountingAllocator() = default;
-
-  // Returns nullptr on failed allocation.
-  virtual void* Allocate(size_t bytes);
-  virtual void Free(void* memory, size_t bytes);
-
-  size_t GetCurrentMemoryUsage() const;
-  size_t GetMaxMemoryUsage() const;
-
- private:
-  AtomicWord current_memory_usage_ = 0;
-  AtomicWord max_memory_usage_ = 0;
-
-  DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
-};
-
-}  // namespace base
-}  // namespace v8
-
-#endif  // V8_BASE_ACCOUNTING_ALLOCATOR_H_
diff --git a/src/base/atomic-utils.h b/src/base/atomic-utils.h
index e19385d..31db603 100644
--- a/src/base/atomic-utils.h
+++ b/src/base/atomic-utils.h
@@ -72,6 +72,22 @@
            cast_helper<T>::to_storage_type(old_value);
   }
 
+  V8_INLINE void SetBits(T bits, T mask) {
+    DCHECK_EQ(bits & ~mask, 0);
+    T old_value;
+    T new_value;
+    do {
+      old_value = Value();
+      new_value = (old_value & ~mask) | bits;
+    } while (!TrySetValue(old_value, new_value));
+  }
+
+  V8_INLINE void SetBit(int bit) {
+    SetBits(static_cast<T>(1) << bit, static_cast<T>(1) << bit);
+  }
+
+  V8_INLINE void ClearBit(int bit) { SetBits(0, 1 << bit); }
+
   V8_INLINE void SetValue(T new_value) {
     base::Release_Store(&value_, cast_helper<T>::to_storage_type(new_value));
   }
diff --git a/src/base/build_config.h b/src/base/build_config.h
index e033134..d113c2a 100644
--- a/src/base/build_config.h
+++ b/src/base/build_config.h
@@ -55,13 +55,21 @@
     defined(__ARM_ARCH_7R__) || \
     defined(__ARM_ARCH_7__)
 # define CAN_USE_ARMV7_INSTRUCTIONS 1
+#ifdef __ARM_ARCH_EXT_IDIV__
+#define CAN_USE_SUDIV 1
+#endif
 # ifndef CAN_USE_VFP3_INSTRUCTIONS
-#  define CAN_USE_VFP3_INSTRUCTIONS
+#define CAN_USE_VFP3_INSTRUCTIONS 1
 # endif
 #endif
 
 #if defined(__ARM_ARCH_8A__)
+#define CAN_USE_ARMV7_INSTRUCTIONS 1
+#define CAN_USE_SUDIV 1
 # define CAN_USE_ARMV8_INSTRUCTIONS 1
+#ifndef CAN_USE_VFP3_INSTRUCTIONS
+#define CAN_USE_VFP3_INSTRUCTIONS 1
+#endif
 #endif
 
 
@@ -196,11 +204,6 @@
 
 // Number of bits to represent the page size for paged spaces. The value of 20
 // gives 1Mb bytes per page.
-#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-// Bump up for Power Linux due to larger (64K) page size.
-const int kPageSizeBits = 22;
-#else
-const int kPageSizeBits = 20;
-#endif
+const int kPageSizeBits = 19;
 
 #endif  // V8_BASE_BUILD_CONFIG_H_
diff --git a/src/base/hashmap-entry.h b/src/base/hashmap-entry.h
new file mode 100644
index 0000000..629e734
--- /dev/null
+++ b/src/base/hashmap-entry.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_HASHMAP_ENTRY_H_
+#define V8_BASE_HASHMAP_ENTRY_H_
+
+#include <cstdint>
+
+namespace v8 {
+namespace base {
+
+// HashMap entries are (key, value, hash) triplets, with a boolean indicating if
+// they are an empty entry. Some clients may not need to use the value slot
+// (e.g. implementers of sets, where the key is the value).
+template <typename Key, typename Value>
+struct TemplateHashMapEntry {
+  Key key;
+  Value value;
+  uint32_t hash;  // The full hash value for key
+
+  TemplateHashMapEntry(Key key, Value value, uint32_t hash)
+      : key(key), value(value), hash(hash), exists_(true) {}
+
+  bool exists() const { return exists_; }
+
+  void clear() { exists_ = false; }
+
+ private:
+  bool exists_;
+};
+
+// Specialization for pointer-valued keys
+template <typename Key, typename Value>
+struct TemplateHashMapEntry<Key*, Value> {
+  Key* key;
+  Value value;
+  uint32_t hash;  // The full hash value for key
+
+  TemplateHashMapEntry(Key* key, Value value, uint32_t hash)
+      : key(key), value(value), hash(hash) {}
+
+  bool exists() const { return key != nullptr; }
+
+  void clear() { key = nullptr; }
+};
+
+// TODO(leszeks): There could be a specialisation for void values (e.g. for
+// sets), which omits the value field
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_HASHMAP_ENTRY_H_
diff --git a/src/base/hashmap.h b/src/base/hashmap.h
index e3c47de..54038c5 100644
--- a/src/base/hashmap.h
+++ b/src/base/hashmap.h
@@ -12,6 +12,7 @@
 #include <stdlib.h>
 
 #include "src/base/bits.h"
+#include "src/base/hashmap-entry.h"
 #include "src/base/logging.h"
 
 namespace v8 {
@@ -23,10 +24,10 @@
   V8_INLINE static void Delete(void* p) { free(p); }
 };
 
-template <class AllocationPolicy>
+template <typename Key, typename Value, class MatchFun, class AllocationPolicy>
 class TemplateHashMapImpl {
  public:
-  typedef bool (*MatchFun)(void* key1, void* key2);
+  typedef TemplateHashMapEntry<Key, Value> Entry;
 
   // The default capacity.  This is used by the call sites which want
   // to pass in a non-default AllocationPolicy but want to use the
@@ -35,38 +36,36 @@
 
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
-  TemplateHashMapImpl(MatchFun match,
-                      uint32_t capacity = kDefaultHashMapCapacity,
+  TemplateHashMapImpl(uint32_t capacity = kDefaultHashMapCapacity,
+                      MatchFun match = MatchFun(),
                       AllocationPolicy allocator = AllocationPolicy());
 
   ~TemplateHashMapImpl();
 
-  // HashMap entries are (key, value, hash) triplets.
-  // Some clients may not need to use the value slot
-  // (e.g. implementers of sets, where the key is the value).
-  struct Entry {
-    void* key;
-    void* value;
-    uint32_t hash;  // The full hash value for key
-  };
-
   // If an entry with matching key is found, returns that entry.
-  // Otherwise, NULL is returned.
-  Entry* Lookup(void* key, uint32_t hash) const;
+  // Otherwise, nullptr is returned.
+  Entry* Lookup(const Key& key, uint32_t hash) const;
 
   // If an entry with matching key is found, returns that entry.
   // If no matching entry is found, a new entry is inserted with
-  // corresponding key, key hash, and NULL value.
-  Entry* LookupOrInsert(void* key, uint32_t hash,
+  // corresponding key, key hash, and default initialized value.
+  Entry* LookupOrInsert(const Key& key, uint32_t hash,
                         AllocationPolicy allocator = AllocationPolicy());
 
-  Entry* InsertNew(void* key, uint32_t hash,
+  // If an entry with matching key is found, returns that entry.
+  // If no matching entry is found, a new entry is inserted with
+  // corresponding key, key hash, and value created by func.
+  template <typename Func>
+  Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func,
+                        AllocationPolicy allocator = AllocationPolicy());
+
+  Entry* InsertNew(const Key& key, uint32_t hash,
                    AllocationPolicy allocator = AllocationPolicy());
 
   // Removes the entry with matching key.
   // It returns the value of the deleted entry
   // or null if there is no value for such key.
-  void* Remove(void* key, uint32_t hash);
+  Value Remove(const Key& key, uint32_t hash);
 
   // Empties the hash map (occupancy() == 0).
   void Clear();
@@ -81,97 +80,101 @@
 
   // Iteration
   //
-  // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
+  // for (Entry* p = map.Start(); p != nullptr; p = map.Next(p)) {
   //   ...
   // }
   //
   // If entries are inserted during iteration, the effect of
   // calling Next() is undefined.
   Entry* Start() const;
-  Entry* Next(Entry* p) const;
-
-  // Some match functions defined for convenience.
-  static bool PointersMatch(void* key1, void* key2) { return key1 == key2; }
+  Entry* Next(Entry* entry) const;
 
  private:
-  MatchFun match_;
   Entry* map_;
   uint32_t capacity_;
   uint32_t occupancy_;
+  // TODO(leszeks): This takes up space even if it has no state, maybe replace
+  // with something that does the empty base optimisation e.g. std::tuple
+  MatchFun match_;
 
   Entry* map_end() const { return map_ + capacity_; }
-  Entry* Probe(void* key, uint32_t hash) const;
+  Entry* Probe(const Key& key, uint32_t hash) const;
+  Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
+                        uint32_t hash,
+                        AllocationPolicy allocator = AllocationPolicy());
   void Initialize(uint32_t capacity, AllocationPolicy allocator);
   void Resize(AllocationPolicy allocator);
 };
-
-typedef TemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
-
-template <class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
-    MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
-  match_ = match;
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
+    TemplateHashMapImpl(uint32_t initial_capacity, MatchFun match,
+                        AllocationPolicy allocator)
+    : match_(match) {
   Initialize(initial_capacity, allocator);
 }
 
-template <class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun,
+                    AllocationPolicy>::~TemplateHashMapImpl() {
   AllocationPolicy::Delete(map_);
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
-  Entry* p = Probe(key, hash);
-  return p->key != NULL ? p : NULL;
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Lookup(
+    const Key& key, uint32_t hash) const {
+  Entry* entry = Probe(key, hash);
+  return entry->exists() ? entry : nullptr;
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
-    void* key, uint32_t hash, AllocationPolicy allocator) {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+    const Key& key, uint32_t hash, AllocationPolicy allocator) {
+  return LookupOrInsert(key, hash, []() { return Value(); }, allocator);
+}
+
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+template <typename Func>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+    const Key& key, uint32_t hash, const Func& value_func,
+    AllocationPolicy allocator) {
   // Find a matching entry.
-  Entry* p = Probe(key, hash);
-  if (p->key != NULL) {
-    return p;
+  Entry* entry = Probe(key, hash);
+  if (entry->exists()) {
+    return entry;
   }
 
-  return InsertNew(key, hash, allocator);
+  return FillEmptyEntry(entry, key, value_func(), hash, allocator);
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::InsertNew(void* key, uint32_t hash,
-                                                 AllocationPolicy allocator) {
-  // Find a matching entry.
-  Entry* p = Probe(key, hash);
-  DCHECK(p->key == NULL);
-
-  // No entry found; insert one.
-  p->key = key;
-  p->value = NULL;
-  p->hash = hash;
-  occupancy_++;
-
-  // Grow the map if we reached >= 80% occupancy.
-  if (occupancy_ + occupancy_ / 4 >= capacity_) {
-    Resize(allocator);
-    p = Probe(key, hash);
-  }
-
-  return p;
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::InsertNew(
+    const Key& key, uint32_t hash, AllocationPolicy allocator) {
+  Entry* entry = Probe(key, hash);
+  return FillEmptyEntry(entry, key, Value(), hash, allocator);
 }
 
-template <class AllocationPolicy>
-void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+Value TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Remove(
+    const Key& key, uint32_t hash) {
   // Lookup the entry for the key to remove.
   Entry* p = Probe(key, hash);
-  if (p->key == NULL) {
+  if (!p->exists()) {
     // Key not found nothing to remove.
-    return NULL;
+    return nullptr;
   }
 
-  void* value = p->value;
+  Value value = p->value;
   // To remove an entry we need to ensure that it does not create an empty
   // entry that will cause the search for another entry to stop too soon. If all
   // the entries between the entry to remove and the next empty slot have their
@@ -200,7 +203,7 @@
     // All entries between p and q have their initial position between p and q
     // and the entry p can be cleared without breaking the search for these
     // entries.
-    if (q->key == NULL) {
+    if (!q->exists()) {
       break;
     }
 
@@ -217,67 +220,92 @@
   }
 
   // Clear the entry which is allowed to en emptied.
-  p->key = NULL;
+  p->clear();
   occupancy_--;
   return value;
 }
 
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Clear() {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
   // Mark all entries as empty.
   const Entry* end = map_end();
-  for (Entry* p = map_; p < end; p++) {
-    p->key = NULL;
+  for (Entry* entry = map_; entry < end; entry++) {
+    entry->clear();
   }
   occupancy_ = 0;
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Start() const {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Start() const {
   return Next(map_ - 1);
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Next(
+    Entry* entry) const {
   const Entry* end = map_end();
-  DCHECK(map_ - 1 <= p && p < end);
-  for (p++; p < end; p++) {
-    if (p->key != NULL) {
-      return p;
+  DCHECK(map_ - 1 <= entry && entry < end);
+  for (entry++; entry < end; entry++) {
+    if (entry->exists()) {
+      return entry;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
-  DCHECK(key != NULL);
-
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
+    const Key& key, uint32_t hash) const {
   DCHECK(base::bits::IsPowerOfTwo32(capacity_));
-  Entry* p = map_ + (hash & (capacity_ - 1));
+  Entry* entry = map_ + (hash & (capacity_ - 1));
   const Entry* end = map_end();
-  DCHECK(map_ <= p && p < end);
+  DCHECK(map_ <= entry && entry < end);
 
   DCHECK(occupancy_ < capacity_);  // Guarantees loop termination.
-  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
-    p++;
-    if (p >= end) {
-      p = map_;
+  while (entry->exists() && !match_(hash, entry->hash, key, entry->key)) {
+    entry++;
+    if (entry >= end) {
+      entry = map_;
     }
   }
 
-  return p;
+  return entry;
 }
 
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Initialize(
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::FillEmptyEntry(
+    Entry* entry, const Key& key, const Value& value, uint32_t hash,
+    AllocationPolicy allocator) {
+  DCHECK(!entry->exists());
+
+  new (entry) Entry(key, value, hash);
+  occupancy_++;
+
+  // Grow the map if we reached >= 80% occupancy.
+  if (occupancy_ + occupancy_ / 4 >= capacity_) {
+    Resize(allocator);
+    entry = Probe(key, hash);
+  }
+
+  return entry;
+}
+
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Initialize(
     uint32_t capacity, AllocationPolicy allocator) {
   DCHECK(base::bits::IsPowerOfTwo32(capacity));
   map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
-  if (map_ == NULL) {
+  if (map_ == nullptr) {
     FATAL("Out of memory: HashMap::Initialize");
     return;
   }
@@ -285,8 +313,10 @@
   Clear();
 }
 
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Resize(
+    AllocationPolicy allocator) {
   Entry* map = map_;
   uint32_t n = occupancy_;
 
@@ -294,10 +324,11 @@
   Initialize(capacity_ * 2, allocator);
 
   // Rehash all current entries.
-  for (Entry* p = map; n > 0; p++) {
-    if (p->key != NULL) {
-      Entry* entry = LookupOrInsert(p->key, p->hash, allocator);
-      entry->value = p->value;
+  for (Entry* entry = map; n > 0; entry++) {
+    if (entry->exists()) {
+      Entry* new_entry = Probe(entry->key, entry->hash);
+      new_entry = FillEmptyEntry(new_entry, entry->key, entry->value,
+                                 entry->hash, allocator);
       n--;
     }
   }
@@ -306,9 +337,83 @@
   AllocationPolicy::Delete(map);
 }
 
+// Match function which compares hashes before executing a (potentially
+// expensive) key comparison.
+template <typename Key, typename MatchFun>
+struct HashEqualityThenKeyMatcher {
+  explicit HashEqualityThenKeyMatcher(MatchFun match) : match_(match) {}
+
+  bool operator()(uint32_t hash1, uint32_t hash2, const Key& key1,
+                  const Key& key2) const {
+    return hash1 == hash2 && match_(key1, key2);
+  }
+
+ private:
+  MatchFun match_;
+};
+
+// Hashmap<void*, void*> which takes a custom key comparison function pointer.
+template <typename AllocationPolicy>
+class CustomMatcherTemplateHashMapImpl
+    : public TemplateHashMapImpl<
+          void*, void*,
+          HashEqualityThenKeyMatcher<void*, bool (*)(void*, void*)>,
+          AllocationPolicy> {
+  typedef TemplateHashMapImpl<
+      void*, void*, HashEqualityThenKeyMatcher<void*, bool (*)(void*, void*)>,
+      AllocationPolicy>
+      Base;
+
+ public:
+  typedef bool (*MatchFun)(void*, void*);
+
+  CustomMatcherTemplateHashMapImpl(
+      MatchFun match, uint32_t capacity = Base::kDefaultHashMapCapacity,
+      AllocationPolicy allocator = AllocationPolicy())
+      : Base(capacity, HashEqualityThenKeyMatcher<void*, MatchFun>(match),
+             allocator) {}
+};
+
+typedef CustomMatcherTemplateHashMapImpl<DefaultAllocationPolicy>
+    CustomMatcherHashMap;
+
+// Match function which compares keys directly by equality.
+template <typename Key>
+struct KeyEqualityMatcher {
+  bool operator()(uint32_t hash1, uint32_t hash2, const Key& key1,
+                  const Key& key2) const {
+    return key1 == key2;
+  }
+};
+
+// Hashmap<void*, void*> which compares the key pointers directly.
+template <typename AllocationPolicy>
+class PointerTemplateHashMapImpl
+    : public TemplateHashMapImpl<void*, void*, KeyEqualityMatcher<void*>,
+                                 AllocationPolicy> {
+  typedef TemplateHashMapImpl<void*, void*, KeyEqualityMatcher<void*>,
+                              AllocationPolicy>
+      Base;
+
+ public:
+  PointerTemplateHashMapImpl(uint32_t capacity = Base::kDefaultHashMapCapacity,
+                             AllocationPolicy allocator = AllocationPolicy())
+      : Base(capacity, KeyEqualityMatcher<void*>(), allocator) {}
+};
+
+typedef PointerTemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
+
 // A hash map for pointer keys and values with an STL-like interface.
-template <class Key, class Value, class AllocationPolicy>
-class TemplateHashMap : private TemplateHashMapImpl<AllocationPolicy> {
+template <class Key, class Value, class MatchFun, class AllocationPolicy>
+class TemplateHashMap
+    : private TemplateHashMapImpl<void*, void*,
+                                  HashEqualityThenKeyMatcher<void*, MatchFun>,
+                                  AllocationPolicy> {
+  typedef TemplateHashMapImpl<void*, void*,
+                              HashEqualityThenKeyMatcher<void*, MatchFun>,
+                              AllocationPolicy>
+      Base;
+
  public:
   STATIC_ASSERT(sizeof(Key*) == sizeof(void*));    // NOLINT
   STATIC_ASSERT(sizeof(Value*) == sizeof(void*));  // NOLINT
@@ -328,26 +433,22 @@
     bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
 
    private:
-    Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
-             typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry)
+    Iterator(const Base* map, typename Base::Entry* entry)
         : map_(map), entry_(entry) {}
 
-    const TemplateHashMapImpl<AllocationPolicy>* map_;
-    typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
+    const Base* map_;
+    typename Base::Entry* entry_;
 
     friend class TemplateHashMap;
   };
 
-  TemplateHashMap(
-      typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
-      AllocationPolicy allocator = AllocationPolicy())
-      : TemplateHashMapImpl<AllocationPolicy>(
-            match,
-            TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
-            allocator) {}
+  TemplateHashMap(MatchFun match,
+                  AllocationPolicy allocator = AllocationPolicy())
+      : Base(Base::kDefaultHashMapCapacity,
+             HashEqualityThenKeyMatcher<void*, MatchFun>(match), allocator) {}
 
   Iterator begin() const { return Iterator(this, this->Start()); }
-  Iterator end() const { return Iterator(this, NULL); }
+  Iterator end() const { return Iterator(this, nullptr); }
   Iterator find(Key* key, bool insert = false,
                 AllocationPolicy allocator = AllocationPolicy()) {
     if (insert) {
diff --git a/src/base/macros.h b/src/base/macros.h
index 822c887..e386617 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -21,12 +21,6 @@
 // The expression is a compile-time constant, and therefore can be
 // used in defining new arrays, for example.  If you use arraysize on
 // a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function.  In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
-// due to a limitation in C++'s template system.  The limitation might
-// eventually be removed, but it hasn't happened yet.
 #define arraysize(array) (sizeof(ArraySizeHelper(array)))
 
 
diff --git a/src/base/platform/platform-macos.cc b/src/base/platform/platform-macos.cc
index b75bc47..69c1816 100644
--- a/src/base/platform/platform-macos.cc
+++ b/src/base/platform/platform-macos.cc
@@ -250,10 +250,7 @@
   return munmap(address, size) == 0;
 }
 
-
-bool VirtualMemory::HasLazyCommits() {
-  return false;
-}
+bool VirtualMemory::HasLazyCommits() { return true; }
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/basic-block-profiler.h b/src/basic-block-profiler.h
index 2e7ac9c..c3c8b64 100644
--- a/src/basic-block-profiler.h
+++ b/src/basic-block-profiler.h
@@ -11,6 +11,7 @@
 #include <vector>
 
 #include "src/base/macros.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -58,15 +59,16 @@
   const DataList* data_list() { return &data_list_; }
 
  private:
-  friend std::ostream& operator<<(std::ostream& os,
-                                  const BasicBlockProfiler& s);
+  friend V8_EXPORT_PRIVATE std::ostream& operator<<(
+      std::ostream& os, const BasicBlockProfiler& s);
 
   DataList data_list_;
 
   DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
 };
 
-std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& s);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           const BasicBlockProfiler& s);
 std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& s);
 
 }  // namespace internal
diff --git a/src/bit-vector.h b/src/bit-vector.h
index 3703f28..13f9e97 100644
--- a/src/bit-vector.h
+++ b/src/bit-vector.h
@@ -6,7 +6,7 @@
 #define V8_DATAFLOW_H_
 
 #include "src/allocation.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 5142817..62cebfb 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -210,7 +210,6 @@
   HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
   HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
   HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
-  DECLARE_FEATURE_INITIALIZATION(intl_extra, "")
 #undef DECLARE_FEATURE_INITIALIZATION
 
   Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
@@ -661,6 +660,16 @@
   // Create iterator-related meta-objects.
   Handle<JSObject> iterator_prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
+
+  Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
+      isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
+      Builtins::kIteratorPrototypeIterator, 0, true);
+  iterator_prototype_iterator->shared()->set_native(true);
+
+  JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
+                        iterator_prototype_iterator, DONT_ENUM);
+  native_context()->set_initial_iterator_prototype(*iterator_prototype);
+
   Handle<JSObject> generator_object_prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
   native_context()->set_initial_generator_prototype(
@@ -694,6 +703,12 @@
   SimpleInstallFunction(generator_object_prototype, "throw",
                         Builtins::kGeneratorPrototypeThrow, 1, true);
 
+  // Internal version of generator_prototype_next, flagged as non-native.
+  Handle<JSFunction> generator_next_internal =
+      SimpleCreateFunction(isolate(), factory()->next_string(),
+                           Builtins::kGeneratorPrototypeNext, 1, true);
+  native_context()->set_generator_next_internal(*generator_next_internal);
+
   // Create maps for generator functions and their prototypes.  Store those
   // maps in the native context. The "prototype" property descriptor is
   // writable, non-enumerable, and non-configurable (as per ES6 draft
@@ -991,13 +1006,10 @@
   error_fun->shared()->set_construct_stub(
       *isolate->builtins()->ErrorConstructor());
   error_fun->shared()->set_length(1);
-  error_fun->shared()->set_native(true);
 
   if (context_index == Context::ERROR_FUNCTION_INDEX) {
-    Handle<JSFunction> capture_stack_trace_fun =
-        SimpleInstallFunction(error_fun, "captureStackTrace",
-                              Builtins::kErrorCaptureStackTrace, 2, false);
-    capture_stack_trace_fun->shared()->set_native(true);
+    SimpleInstallFunction(error_fun, "captureStackTrace",
+                          Builtins::kErrorCaptureStackTrace, 2, false);
   }
 
   InstallWithIntrinsicDefaultProto(isolate, error_fun, context_index);
@@ -1016,7 +1028,6 @@
       Handle<JSFunction> to_string_fun =
           SimpleInstallFunction(prototype, factory->toString_string(),
                                 Builtins::kErrorPrototypeToString, 0, true);
-      to_string_fun->shared()->set_native(true);
       isolate->native_context()->set_error_to_string(*to_string_fun);
     } else {
       DCHECK(context_index != Context::ERROR_FUNCTION_INDEX);
@@ -1206,6 +1217,8 @@
         JSObject::kHeaderSize, MaybeHandle<JSObject>(),
         Builtins::kFunctionPrototypeHasInstance,
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
+    has_instance->shared()->set_builtin_function_id(kFunctionHasInstance);
+    native_context()->set_function_has_instance(*has_instance);
 
     // Set the expected parameters for @@hasInstance to 1; required by builtin.
     has_instance->shared()->set_internal_formal_parameter_count(1);
@@ -1303,6 +1316,15 @@
     // Install i18n fallback functions.
     SimpleInstallFunction(prototype, "toLocaleString",
                           Builtins::kNumberPrototypeToLocaleString, 0, false);
+
+    // Install the Number functions.
+    SimpleInstallFunction(number_fun, "isFinite", Builtins::kNumberIsFinite, 1,
+                          true);
+    SimpleInstallFunction(number_fun, "isInteger", Builtins::kNumberIsInteger,
+                          1, true);
+    SimpleInstallFunction(number_fun, "isNaN", Builtins::kNumberIsNaN, 1, true);
+    SimpleInstallFunction(number_fun, "isSafeInteger",
+                          Builtins::kNumberIsSafeInteger, 1, true);
   }
 
   {  // --- B o o l e a n ---
@@ -1384,6 +1406,16 @@
                           1, true);
     SimpleInstallFunction(prototype, "charCodeAt",
                           Builtins::kStringPrototypeCharCodeAt, 1, true);
+    SimpleInstallFunction(prototype, "lastIndexOf",
+                          Builtins::kStringPrototypeLastIndexOf, 1, false);
+    SimpleInstallFunction(prototype, "localeCompare",
+                          Builtins::kStringPrototypeLocaleCompare, 1, true);
+    SimpleInstallFunction(prototype, "normalize",
+                          Builtins::kStringPrototypeNormalize, 0, false);
+    SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
+                          2, true);
+    SimpleInstallFunction(prototype, "substring",
+                          Builtins::kStringPrototypeSubstring, 2, true);
     SimpleInstallFunction(prototype, "toString",
                           Builtins::kStringPrototypeToString, 0, true);
     SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
@@ -1394,6 +1426,47 @@
                           Builtins::kStringPrototypeTrimRight, 0, false);
     SimpleInstallFunction(prototype, "valueOf",
                           Builtins::kStringPrototypeValueOf, 0, true);
+
+    Handle<JSFunction> iterator = SimpleCreateFunction(
+        isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
+        Builtins::kStringPrototypeIterator, 0, true);
+    iterator->shared()->set_native(true);
+    JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
+                          static_cast<PropertyAttributes>(DONT_ENUM));
+  }
+
+  {  // --- S t r i n g I t e r a t o r ---
+    Handle<JSObject> iterator_prototype(
+        native_context()->initial_iterator_prototype());
+
+    Handle<JSObject> string_iterator_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    JSObject::ForceSetPrototype(string_iterator_prototype, iterator_prototype);
+
+    JSObject::AddProperty(
+        string_iterator_prototype, factory->to_string_tag_symbol(),
+        factory->NewStringFromAsciiChecked("String Iterator"),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+    Handle<JSFunction> next =
+        InstallFunction(string_iterator_prototype, "next", JS_OBJECT_TYPE,
+                        JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+                        Builtins::kStringIteratorPrototypeNext);
+    next->shared()->set_builtin_function_id(kStringIteratorNext);
+
+    // Set the expected parameters for %StringIteratorPrototype%.next to 0 (not
+    // including the receiver), as required by the builtin.
+    next->shared()->set_internal_formal_parameter_count(0);
+
+    // Set the length for the function to satisfy ECMA-262.
+    next->shared()->set_length(0);
+
+    Handle<JSFunction> string_iterator_function = CreateFunction(
+        isolate, factory->NewStringFromAsciiChecked("StringIterator"),
+        JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
+        string_iterator_prototype, Builtins::kIllegal);
+    native_context()->set_string_iterator_map(
+        string_iterator_function->initial_map());
   }
 
   {
@@ -1575,14 +1648,28 @@
 
   {  // -- R e g E x p
     // Builtin functions for RegExp.prototype.
+    Handle<JSObject> prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
     Handle<JSFunction> regexp_fun =
         InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
-                        isolate->initial_object_prototype(),
-                        Builtins::kIllegal);
+                        prototype, Builtins::kRegExpConstructor);
     InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
                                      Context::REGEXP_FUNCTION_INDEX);
-    regexp_fun->shared()->SetConstructStub(
-        *isolate->builtins()->JSBuiltinsConstructStub());
+
+    Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
+    shared->SetConstructStub(*isolate->builtins()->RegExpConstructor());
+    shared->set_instance_class_name(isolate->heap()->RegExp_string());
+    shared->DontAdaptArguments();
+    shared->set_length(2);
+
+    // RegExp.prototype setup.
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(), regexp_fun,
+                          DONT_ENUM);
+
+    SimpleInstallFunction(prototype, "exec", Builtins::kRegExpPrototypeExec, 1,
+                          true, DONT_ENUM);
 
     DCHECK(regexp_fun->has_initial_map());
     Handle<Map> initial_map(regexp_fun->initial_map());
@@ -1840,6 +1927,39 @@
     SimpleInstallGetter(prototype, factory->byte_offset_string(),
                         Builtins::kDataViewPrototypeGetByteOffset, false,
                         kDataViewByteOffset);
+
+    SimpleInstallFunction(prototype, "getInt8",
+                          Builtins::kDataViewPrototypeGetInt8, 1, false);
+    SimpleInstallFunction(prototype, "setInt8",
+                          Builtins::kDataViewPrototypeSetInt8, 2, false);
+    SimpleInstallFunction(prototype, "getUint8",
+                          Builtins::kDataViewPrototypeGetUint8, 1, false);
+    SimpleInstallFunction(prototype, "setUint8",
+                          Builtins::kDataViewPrototypeSetUint8, 2, false);
+    SimpleInstallFunction(prototype, "getInt16",
+                          Builtins::kDataViewPrototypeGetInt16, 1, false);
+    SimpleInstallFunction(prototype, "setInt16",
+                          Builtins::kDataViewPrototypeSetInt16, 2, false);
+    SimpleInstallFunction(prototype, "getUint16",
+                          Builtins::kDataViewPrototypeGetUint16, 1, false);
+    SimpleInstallFunction(prototype, "setUint16",
+                          Builtins::kDataViewPrototypeSetUint16, 2, false);
+    SimpleInstallFunction(prototype, "getInt32",
+                          Builtins::kDataViewPrototypeGetInt32, 1, false);
+    SimpleInstallFunction(prototype, "setInt32",
+                          Builtins::kDataViewPrototypeSetInt32, 2, false);
+    SimpleInstallFunction(prototype, "getUint32",
+                          Builtins::kDataViewPrototypeGetUint32, 1, false);
+    SimpleInstallFunction(prototype, "setUint32",
+                          Builtins::kDataViewPrototypeSetUint32, 2, false);
+    SimpleInstallFunction(prototype, "getFloat32",
+                          Builtins::kDataViewPrototypeGetFloat32, 1, false);
+    SimpleInstallFunction(prototype, "setFloat32",
+                          Builtins::kDataViewPrototypeSetFloat32, 2, false);
+    SimpleInstallFunction(prototype, "getFloat64",
+                          Builtins::kDataViewPrototypeGetFloat64, 1, false);
+    SimpleInstallFunction(prototype, "setFloat64",
+                          Builtins::kDataViewPrototypeSetFloat64, 2, false);
   }
 
   {  // -- M a p
@@ -2177,7 +2297,6 @@
   HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
   HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
   HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
-  FEATURE_INITIALIZE_GLOBAL(intl_extra, "")
 #undef FEATURE_INITIALIZE_GLOBAL
 }
 
@@ -2423,17 +2542,12 @@
     native_context->set_object_to_string(*to_string);
   }
 
-  Handle<JSObject> iterator_prototype;
+  Handle<JSObject> iterator_prototype(
+      native_context->initial_iterator_prototype());
 
-  {
-    PrototypeIterator iter(native_context->generator_object_prototype_map());
-    iter.Advance();  // Advance to the prototype of generator_object_prototype.
-    iterator_prototype = Handle<JSObject>(iter.GetCurrent<JSObject>());
-
-    JSObject::AddProperty(container,
-                          factory->InternalizeUtf8String("IteratorPrototype"),
-                          iterator_prototype, NONE);
-  }
+  JSObject::AddProperty(container,
+                        factory->InternalizeUtf8String("IteratorPrototype"),
+                        iterator_prototype, NONE);
 
   {
     PrototypeIterator iter(native_context->sloppy_generator_function_map());
@@ -2686,8 +2800,6 @@
         container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
         isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
     callsite_fun->shared()->DontAdaptArguments();
-    callsite_fun->shared()->set_native(true);
-
     isolate->native_context()->set_callsite_function(*callsite_fun);
 
     {
@@ -2725,8 +2837,7 @@
 
       Handle<JSFunction> fun;
       for (const FunctionInfo& info : infos) {
-        fun = SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
-        fun->shared()->set_native(true);
+        SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
       }
 
       Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
@@ -2739,6 +2850,7 @@
                                                  Handle<JSObject> container) {
   HandleScope scope(isolate);
 
+#ifdef V8_I18N_SUPPORT
 #define INITIALIZE_FLAG(FLAG)                                         \
   {                                                                   \
     Handle<String> name =                                             \
@@ -2747,9 +2859,8 @@
                           isolate->factory()->ToBoolean(FLAG), NONE); \
   }
 
-  INITIALIZE_FLAG(FLAG_intl_extra)
-
 #undef INITIALIZE_FLAG
+#endif
 }
 
 
@@ -2762,17 +2873,17 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(intl_extra)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_explicit_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
 #ifdef V8_I18N_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
 #endif
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -3106,6 +3217,14 @@
     native_context()->set_global_eval_fun(*eval);
   }
 
+  // Install Global.isFinite
+  SimpleInstallFunction(global_object, "isFinite", Builtins::kGlobalIsFinite, 1,
+                        true, kGlobalIsFinite);
+
+  // Install Global.isNaN
+  SimpleInstallFunction(global_object, "isNaN", Builtins::kGlobalIsNaN, 1, true,
+                        kGlobalIsNaN);
+
   // Install Array.prototype.concat
   {
     Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3336,7 +3455,6 @@
 
 
 bool Genesis::InstallExperimentalNatives() {
-  static const char* harmony_explicit_tailcalls_natives[] = {nullptr};
   static const char* harmony_tailcalls_natives[] = {nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
       "native harmony-atomics.js", NULL};
@@ -3349,7 +3467,6 @@
   static const char* harmony_regexp_named_captures_natives[] = {nullptr};
   static const char* harmony_regexp_property_natives[] = {nullptr};
   static const char* harmony_function_sent_natives[] = {nullptr};
-  static const char* intl_extra_natives[] = {"native intl-extra.js", nullptr};
   static const char* harmony_object_values_entries_natives[] = {nullptr};
   static const char* harmony_object_own_property_descriptors_natives[] = {
       nullptr};
@@ -3359,11 +3476,13 @@
 #ifdef V8_I18N_SUPPORT
   static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
                                                    nullptr};
+  static const char* datetime_format_to_parts_natives[] = {
+      "native datetime-format-to-parts.js", nullptr};
 #endif
-  static const char* harmony_async_await_natives[] = {
-      "native harmony-async-await.js", nullptr};
+  static const char* harmony_async_await_natives[] = {nullptr};
   static const char* harmony_restrictive_generators_natives[] = {nullptr};
   static const char* harmony_trailing_commas_natives[] = {nullptr};
+  static const char* harmony_class_fields_natives[] = {nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3382,7 +3501,6 @@
     HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
     HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
     HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
-    INSTALL_EXPERIMENTAL_NATIVES(intl_extra, "");
 #undef INSTALL_EXPERIMENTAL_NATIVES
   }
 
@@ -3547,8 +3665,7 @@
   return v8::internal::ComputePointerHash(extension);
 }
 
-Genesis::ExtensionStates::ExtensionStates()
-    : map_(base::HashMap::PointersMatch, 8) {}
+Genesis::ExtensionStates::ExtensionStates() : map_(8) {}
 
 Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
     RegisteredExtension* extension) {
@@ -4003,9 +4120,7 @@
 
   // Check that the script context table is empty except for the 'this' binding.
   // We do not need script contexts for native scripts.
-  if (!FLAG_global_var_shortcuts) {
-    DCHECK_EQ(1, native_context()->script_context_table()->used());
-  }
+  DCHECK_EQ(1, native_context()->script_context_table()->used());
 
   result_ = native_context();
 }
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
index 1b643d4..2c0bef2 100644
--- a/src/builtins/arm/builtins-arm.cc
+++ b/src/builtins/arm/builtins-arm.cc
@@ -387,10 +387,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(r2);
     __ EnterBuiltinFrame(cp, r1, r2);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, r1, r2);
     __ SmiUntag(r2);
   }
@@ -449,12 +448,11 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(r6);
       __ EnterBuiltinFrame(cp, r1, r6);
       __ Push(r3);
       __ Move(r0, r2);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Move(r2, r0);
       __ Pop(r3);
       __ LeaveBuiltinFrame(cp, r1, r6);
@@ -1060,6 +1058,17 @@
   __ cmp(r0, Operand(masm->CodeObject()));  // Self-reference to this code.
   __ b(ne, &switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+  __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+  __ ldr(r9, FieldMemOperand(
+                 r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                         TypeFeedbackVector::kHeaderSize));
+  __ add(r9, r9, Operand(Smi::FromInt(1)));
+  __ str(r9, FieldMemOperand(
+                 r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                         TypeFeedbackVector::kHeaderSize));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ SmiTst(kInterpreterBytecodeArrayRegister);
@@ -1162,8 +1171,33 @@
   __ Jump(lr);
 }
 
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
-                                         Register limit, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+  // Make scratch the space we have left. The stack might already be overflowed
+  // here which will cause scratch to become negative.
+  __ sub(scratch, sp, scratch);
+  // Check if the arguments will overflow the stack.
+  __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+  __ b(le, stack_overflow);  // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register limit, Register scratch,
+                                         Label* stack_overflow) {
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
+  // Find the address of the last argument.
+  __ mov(limit, num_args);
+  __ mov(limit, Operand(limit, LSL, kPointerSizeLog2));
+  __ sub(limit, index, limit);
+
   Label loop_header, loop_check;
   __ b(al, &loop_check);
   __ bind(&loop_header);
@@ -1185,14 +1219,12 @@
   //          they are to be pushed onto the stack.
   //  -- r1 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
-  // Find the address of the last argument.
   __ add(r3, r0, Operand(1));  // Add one for receiver.
-  __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
-  __ sub(r3, r2, r3);
 
-  // Push the arguments.
-  Generate_InterpreterPushArgs(masm, r2, r3, r4);
+  // Push the arguments. r2, r4, r5 will be modified.
+  Generate_InterpreterPushArgs(masm, r3, r2, r4, r5, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1205,30 +1237,88 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ bkpt(0);
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- r0 : argument count (not including receiver)
   // -- r3 : new target
   // -- r1 : constructor to call
-  // -- r2 : address of the first argument
+  // -- r2 : allocation site feedback if available, undefined otherwise.
+  // -- r4 : address of the first argument
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ mov(r4, Operand(r0, LSL, kPointerSizeLog2));
-  __ sub(r4, r2, r4);
+  Label stack_overflow;
 
   // Push a slot for the receiver to be constructed.
   __ mov(ip, Operand::Zero());
   __ push(ip);
 
-  // Push the arguments.
-  Generate_InterpreterPushArgs(masm, r2, r4, r5);
+  // Push the arguments. r5, r4, r6 will be modified.
+  Generate_InterpreterPushArgs(masm, r0, r4, r5, r6, &stack_overflow);
 
-  // Call the constructor with r0, r1, and r3 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(r2, r5);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(r1);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
+    // Jump to the construct function.
+    __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with r0, r1, and r3 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ bkpt(0);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r0 : argument count (not including receiver)
+  // -- r1 : target to call verified to be Array function
+  // -- r2 : allocation site feedback if available, undefined otherwise.
+  // -- r3 : address of the first argument
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ add(r4, r0, Operand(1));  // Add one for receiver.
+
+  // TODO(mythria): Add a stack check before pushing arguments.
+  // Push the arguments. r3, r5, r6 will be modified.
+  Generate_InterpreterPushArgs(masm, r4, r3, r5, r6, &stack_overflow);
+
+  // Array constructor expects constructor in r3. It is same as r1 here.
+  __ mov(r3, r1);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ bkpt(0);
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1816,61 +1906,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- r0    : number of arguments
-  //  -- r1    : function
-  //  -- cp    : context
-  //  -- lr    : return address
-  //  -- sp[0] : receiver
-  // -----------------------------------
-
-  // 1. Pop receiver into r0 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(r0);
-    __ JumpIfSmi(r0, &receiver_not_date);
-    __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
-    __ b(ne, &receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ ldr(r0, FieldMemOperand(r0, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ mov(r1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
-      __ ldr(r1, MemOperand(r1));
-      __ ldr(ip, FieldMemOperand(r0, JSDate::kCacheStampOffset));
-      __ cmp(r1, ip);
-      __ b(ne, &stamp_mismatch);
-      __ ldr(r0, FieldMemOperand(
-                     r0, JSDate::kValueOffset + field_index * kPointerSize));
-      __ Ret();
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, r1);
-    __ mov(r1, Operand(Smi::FromInt(field_index)));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(r0);
-    __ Move(r0, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, r1, r0);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
@@ -2101,26 +2136,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- r0 : actual number of arguments
-  //  -- r1 : function (passed through to callee)
-  //  -- r2 : expected number of arguments
-  //  -- r3 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
-  // Make r5 the space we have left. The stack might already be overflowed
-  // here which will cause r5 to become negative.
-  __ sub(r5, sp, r5);
-  // Check if the arguments will overflow the stack.
-  __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
-  __ b(le, stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r0);
   __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2786,21 +2801,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r0.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r0, Operand(kSmiTagMask));
-  __ Ret(eq);
-
-  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  __ Ret(eq);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0 : actual number of arguments
@@ -2820,7 +2820,7 @@
   {  // Enough parameters: actual >= expected
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
 
     // Calculate copy start address into r0 and copy end address into r4.
     // r0: actual number of arguments as a smi
@@ -2853,7 +2853,7 @@
   {  // Too few parameters: Actual < expected
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
 
     // Calculate copy start address into r0 and copy end address is fp.
     // r0: actual number of arguments as a smi
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
index 57395d8..48551de 100644
--- a/src/builtins/arm64/builtins-arm64.cc
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -379,10 +379,9 @@
   __ Bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(x2);
     __ EnterBuiltinFrame(cp, x1, x2);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, x1, x2);
     __ SmiUntag(x2);
   }
@@ -442,12 +441,11 @@
     __ Bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(x6);
       __ EnterBuiltinFrame(cp, x1, x6);
       __ Push(x3);
       __ Move(x0, x2);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Move(x2, x0);
       __ Pop(x3);
       __ LeaveBuiltinFrame(cp, x1, x6);
@@ -1065,6 +1063,17 @@
   __ Cmp(x0, Operand(masm->CodeObject()));  // Self-reference to this code.
   __ B(ne, &switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
+  __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
+  __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+  __ Add(x10, x10, Operand(Smi::FromInt(1)));
+  __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
@@ -1171,6 +1180,50 @@
   __ Ret();
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow.
+  // We are not trying to catch interruptions (e.g. debug break and
+  // preemption) here, so the "real stack limit" is checked.
+  Label enough_stack_space;
+  __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+  // Make scratch the space we have left. The stack might already be overflowed
+  // here which will cause scratch to become negative.
+  __ Sub(scratch, jssp, scratch);
+  // Check if the arguments will overflow the stack.
+  __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+  __ B(le, stack_overflow);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register last_arg, Register stack_addr,
+                                         Register scratch,
+                                         Label* stack_overflow) {
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
+  __ Mov(scratch, num_args);
+  __ lsl(scratch, scratch, kPointerSizeLog2);
+  __ sub(last_arg, index, scratch);
+
+  // Set stack pointer and where to stop.
+  __ Mov(stack_addr, jssp);
+  __ Claim(scratch, 1);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ B(&loop_check);
+  __ Bind(&loop_header);
+  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+  __ Ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
+  __ Str(scratch, MemOperand(stack_addr, -kPointerSize, PreIndex));
+  __ Bind(&loop_check);
+  __ Cmp(index, last_arg);
+  __ B(gt, &loop_header);
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1182,24 +1235,13 @@
   //          they are to be pushed onto the stack.
   //  -- x1 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
-  // Find the address of the last argument.
-  __ add(x3, x0, Operand(1));  // Add one for receiver.
-  __ lsl(x3, x3, kPointerSizeLog2);
-  __ sub(x4, x2, x3);
+  // Add one for the receiver.
+  __ add(x3, x0, Operand(1));
 
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Mov(x5, jssp);
-  __ Claim(x3, 1);
-  __ B(&loop_check);
-  __ Bind(&loop_header);
-  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
-  __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
-  __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
-  __ Bind(&loop_check);
-  __ Cmp(x2, x4);
-  __ B(gt, &loop_header);
+  // Push the arguments. x2, x4, x5, x6 will be modified.
+  Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1212,42 +1254,82 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ Unreachable();
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- x0 : argument count (not including receiver)
   // -- x3 : new target
   // -- x1 : constructor to call
-  // -- x2 : address of the first argument
+  // -- x2 : allocation site feedback if available, undefined otherwise
+  // -- x4 : address of the first argument
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ add(x5, x0, Operand(1));  // Add one for receiver (to be constructed).
-  __ lsl(x5, x5, kPointerSizeLog2);
-
-  // Set stack pointer and where to stop.
-  __ Mov(x6, jssp);
-  __ Claim(x5, 1);
-  __ sub(x4, x6, x5);
+  Label stack_overflow;
 
   // Push a slot for the receiver.
-  __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+  __ Push(xzr);
 
-  Label loop_header, loop_check;
-  // Push the arguments.
-  __ B(&loop_check);
-  __ Bind(&loop_header);
-  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
-  __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
-  __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
-  __ Bind(&loop_check);
-  __ Cmp(x6, x4);
-  __ B(gt, &loop_header);
+  // Push the arguments. x5, x4, x6, x7 will be modified.
+  Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7, &stack_overflow);
 
-  // Call the constructor with x0, x1, and x3 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(x2, x6);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(x1);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+    __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+    __ Br(x4);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with x0, x1, and x3 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ Unreachable();
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- x0 : argument count (not including receiver)
+  // -- x1 : target to call verified to be Array function
+  // -- x2 : allocation site feedback if available, undefined otherwise.
+  // -- x3 : address of the first argument
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ add(x4, x0, Operand(1));  // Add one for the receiver.
+
+  // Push the arguments. x3, x5, x6, x7 will be modified.
+  Generate_InterpreterPushArgs(masm, x4, x3, x5, x6, x7, &stack_overflow);
+
+  // Array constructor expects constructor in x3. It is same as call target.
+  __ mov(x3, x1);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ Unreachable();
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1820,60 +1902,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- x0      : number of arguments
-  //  -- x1      : function
-  //  -- cp      : context
-  //  -- lr      : return address
-  //  -- jssp[0] : receiver
-  // -----------------------------------
-  ASM_LOCATION("Builtins::Generate_DatePrototype_GetField");
-
-  // 1. Pop receiver into x0 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(x0);
-    __ JumpIfSmi(x0, &receiver_not_date);
-    __ JumpIfNotObjectType(x0, x2, x3, JS_DATE_TYPE, &receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ Ldr(x0, FieldMemOperand(x0, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ Mov(x1, ExternalReference::date_cache_stamp(masm->isolate()));
-      __ Ldr(x1, MemOperand(x1));
-      __ Ldr(x2, FieldMemOperand(x0, JSDate::kCacheStampOffset));
-      __ Cmp(x1, x2);
-      __ B(ne, &stamp_mismatch);
-      __ Ldr(x0, FieldMemOperand(
-                     x0, JSDate::kValueOffset + field_index * kPointerSize));
-      __ Ret();
-      __ Bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Mov(x1, Smi::FromInt(field_index));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ Bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(x0);
-    __ Mov(x0, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, x1, x0);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x0       : argc
@@ -2162,27 +2190,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- x0 : actual number of arguments
-  //  -- x1 : function (passed through to callee)
-  //  -- x2 : expected number of arguments
-  //  -- x3 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow.
-  // We are not trying to catch interruptions (e.g. debug break and
-  // preemption) here, so the "real stack limit" is checked.
-  Label enough_stack_space;
-  __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
-  // Make x10 the space we have left. The stack might already be overflowed
-  // here which will cause x10 to become negative.
-  __ Sub(x10, jssp, x10);
-  // Check if the arguments will overflow the stack.
-  __ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2));
-  __ B(le, stack_overflow);
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(x10, x0);
   __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2451,11 +2458,9 @@
   Label class_constructor;
   __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
-  __ TestAndBranchIfAnySet(
-      w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) |
-              (1 << SharedFunctionInfo::kIsSubclassConstructor) |
-              (1 << SharedFunctionInfo::kIsBaseConstructor),
-      &class_constructor);
+  __ TestAndBranchIfAnySet(w3, FunctionKind::kClassConstructor
+                                   << SharedFunctionInfo::kFunctionKindShift,
+                           &class_constructor);
 
   // Enter the context of the function; ToObject has to run in the function
   // context, and we also need to take the global proxy from the function
@@ -2873,26 +2878,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in x0.
-  Label not_smi;
-  __ JumpIfNotSmi(x0, &not_smi);
-  __ Ret();
-  __ Bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
-  // x0: receiver
-  // x1: receiver instance type
-  __ B(ne, &not_heap_number);
-  __ Ret();
-  __ Bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
   // ----------- S t a t e -------------
@@ -2917,7 +2902,7 @@
 
   {  // Enough parameters: actual >= expected
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
 
     Register copy_start = x10;
     Register copy_end = x11;
@@ -2964,7 +2949,7 @@
     Register scratch1 = x13, scratch2 = x14;
 
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
 
     __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
     __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc
index 09ee4cc..b4969f1 100644
--- a/src/builtins/builtins-array.cc
+++ b/src/builtins/builtins-array.cc
@@ -1269,24 +1269,24 @@
   Node* start_from = assembler->Parameter(2);
   Node* context = assembler->Parameter(3 + 2);
 
-  Node* int32_zero = assembler->Int32Constant(0);
-  Node* int32_one = assembler->Int32Constant(1);
+  Node* intptr_zero = assembler->IntPtrConstant(0);
+  Node* intptr_one = assembler->IntPtrConstant(1);
 
   Node* the_hole = assembler->TheHoleConstant();
   Node* undefined = assembler->UndefinedConstant();
   Node* heap_number_map = assembler->HeapNumberMapConstant();
 
-  Variable len_var(assembler, MachineRepresentation::kWord32),
-      index_var(assembler, MachineRepresentation::kWord32),
-      start_from_var(assembler, MachineRepresentation::kWord32);
+  Variable len_var(assembler, MachineType::PointerRepresentation()),
+      index_var(assembler, MachineType::PointerRepresentation()),
+      start_from_var(assembler, MachineType::PointerRepresentation());
 
   Label init_k(assembler), return_true(assembler), return_false(assembler),
       call_runtime(assembler);
 
   Label init_len(assembler);
 
-  index_var.Bind(int32_zero);
-  len_var.Bind(int32_zero);
+  index_var.Bind(intptr_zero);
+  len_var.Bind(intptr_zero);
 
   // Take slow path if not a JSArray, if retrieving elements requires
   // traversing prototype, or if access checks are required.
@@ -1299,7 +1299,7 @@
     assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
 
     len_var.Bind(assembler->SmiToWord(len));
-    assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+    assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
                       &return_false, &init_k);
   }
 
@@ -1307,31 +1307,32 @@
   {
     Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
         init_k_zero(assembler), init_k_n(assembler);
-    Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
-    Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+    Node* tagged_n = assembler->ToInteger(context, start_from);
 
     assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
                       &init_k_heap_num);
 
     assembler->Bind(&init_k_smi);
     {
-      start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+      start_from_var.Bind(assembler->SmiUntag(tagged_n));
       assembler->Goto(&init_k_n);
     }
 
     assembler->Bind(&init_k_heap_num);
     {
       Label do_return_false(assembler);
-      Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+      // This round is lossless for all valid lengths.
+      Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
       Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
       assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
                         &do_return_false);
-      start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+      start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
+          assembler->TruncateFloat64ToWord32(fp_n)));
       assembler->Goto(&init_k_n);
 
       assembler->Bind(&do_return_false);
       {
-        index_var.Bind(int32_zero);
+        index_var.Bind(intptr_zero);
         assembler->Goto(&return_false);
       }
     }
@@ -1340,7 +1341,7 @@
     {
       Label if_positive(assembler), if_negative(assembler), done(assembler);
       assembler->Branch(
-          assembler->Int32LessThan(start_from_var.value(), int32_zero),
+          assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
           &if_negative, &if_positive);
 
       assembler->Bind(&if_positive);
@@ -1352,15 +1353,15 @@
       assembler->Bind(&if_negative);
       {
         index_var.Bind(
-            assembler->Int32Add(len_var.value(), start_from_var.value()));
+            assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
         assembler->Branch(
-            assembler->Int32LessThan(index_var.value(), int32_zero),
+            assembler->IntPtrLessThan(index_var.value(), intptr_zero),
             &init_k_zero, &done);
       }
 
       assembler->Bind(&init_k_zero);
       {
-        index_var.Bind(int32_zero);
+        index_var.Bind(intptr_zero);
         assembler->Goto(&done);
       }
 
@@ -1380,9 +1381,7 @@
                                     &if_packed_doubles, &if_holey_doubles};
 
   Node* map = assembler->LoadMap(array);
-  Node* bit_field2 = assembler->LoadMapBitField2(map);
-  Node* elements_kind =
-      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements_kind = assembler->LoadMapElementsKind(map);
   Node* elements = assembler->LoadElements(array);
   assembler->Switch(elements_kind, &return_false, kElementsKind,
                     element_kind_handlers, arraysize(kElementsKind));
@@ -1411,43 +1410,41 @@
 
     assembler->Bind(&not_heap_num);
     Node* search_type = assembler->LoadMapInstanceType(map);
+    assembler->GotoIf(assembler->IsStringInstanceType(search_type),
+                      &string_loop);
     assembler->GotoIf(
-        assembler->Int32LessThan(
-            search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-        &string_loop);
-    assembler->GotoIf(
-        assembler->WordEqual(search_type,
-                             assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+        assembler->Word32Equal(search_type,
+                               assembler->Int32Constant(SIMD128_VALUE_TYPE)),
         &simd_loop);
     assembler->Goto(&ident_loop);
 
     assembler->Bind(&ident_loop);
     {
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordEqual(element_k, search_element),
                         &return_true);
 
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&ident_loop);
     }
 
     assembler->Bind(&undef_loop);
     {
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordEqual(element_k, undefined),
                         &return_true);
       assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
                         &return_true);
 
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&undef_loop);
     }
 
@@ -1462,10 +1459,11 @@
       {
         Label continue_loop(assembler), not_smi(assembler);
         assembler->GotoUnless(
-            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
             &return_false);
-        Node* element_k =
-            assembler->LoadFixedArrayElement(elements, index_var.value());
+        Node* element_k = assembler->LoadFixedArrayElement(
+            elements, index_var.value(), 0,
+            CodeStubAssembler::INTPTR_PARAMETERS);
         assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
         assembler->Branch(
             assembler->Float64Equal(search_num.value(),
@@ -1481,7 +1479,7 @@
             &return_true, &continue_loop);
 
         assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
         assembler->Goto(&not_nan_loop);
       }
 
@@ -1489,10 +1487,11 @@
       {
         Label continue_loop(assembler);
         assembler->GotoUnless(
-            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
             &return_false);
-        Node* element_k =
-            assembler->LoadFixedArrayElement(elements, index_var.value());
+        Node* element_k = assembler->LoadFixedArrayElement(
+            elements, index_var.value(), 0,
+            CodeStubAssembler::INTPTR_PARAMETERS);
         assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
         assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
                                                   heap_number_map),
@@ -1502,7 +1501,7 @@
             &continue_loop);
 
         assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
         assembler->Goto(&nan_loop);
       }
     }
@@ -1511,14 +1510,13 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
-      assembler->GotoUnless(assembler->Int32LessThan(
-                                assembler->LoadInstanceType(element_k),
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+      assembler->GotoUnless(assembler->IsStringInstanceType(
+                                assembler->LoadInstanceType(element_k)),
                             &continue_loop);
 
       // TODO(bmeurer): Consider inlining the StringEqual logic here.
@@ -1530,7 +1528,7 @@
           &return_true, &continue_loop);
 
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&string_loop);
     }
 
@@ -1543,11 +1541,11 @@
       assembler->Goto(&loop_body);
       assembler->Bind(&loop_body);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
 
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
 
       Node* map_k = assembler->LoadMap(element_k);
@@ -1555,7 +1553,7 @@
                                       &return_true, &continue_loop);
 
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&loop_body);
     }
   }
@@ -1585,14 +1583,15 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->BranchIfFloat64Equal(element_k, search_num.value(),
                                       &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
     }
 
@@ -1601,13 +1600,14 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&nan_loop);
     }
   }
@@ -1639,31 +1639,18 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
 
-      if (kPointerSize == kDoubleSize) {
-        Node* element = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint64());
-        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
-        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
-                          &continue_loop);
-      } else {
-        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint32(),
-            kIeeeDoubleExponentWordOffset);
-        assembler->GotoIf(
-            assembler->Word32Equal(element_upper,
-                                   assembler->Int32Constant(kHoleNanUpper32)),
-            &continue_loop);
-      }
-
+      // Load double value or continue if it contains a double hole.
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
       assembler->BranchIfFloat64Equal(element_k, search_num.value(),
                                       &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
     }
 
@@ -1672,30 +1659,17 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
 
-      if (kPointerSize == kDoubleSize) {
-        Node* element = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint64());
-        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
-        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
-                          &continue_loop);
-      } else {
-        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint32(),
-            kIeeeDoubleExponentWordOffset);
-        assembler->GotoIf(
-            assembler->Word32Equal(element_upper,
-                                   assembler->Int32Constant(kHoleNanUpper32)),
-            &continue_loop);
-      }
-
+      // Load double value or continue if it contains a double hole.
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
       assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&nan_loop);
     }
 
@@ -1703,26 +1677,15 @@
     assembler->Bind(&hole_loop);
     {
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_false);
 
-      if (kPointerSize == kDoubleSize) {
-        Node* element = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint64());
-        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
-        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
-                          &return_true);
-      } else {
-        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint32(),
-            kIeeeDoubleExponentWordOffset);
-        assembler->GotoIf(
-            assembler->Word32Equal(element_upper,
-                                   assembler->Int32Constant(kHoleNanUpper32)),
-            &return_true);
-      }
+      // Check if the element is a double hole, but don't load it.
+      assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::None(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
 
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&hole_loop);
     }
   }
@@ -1749,23 +1712,23 @@
   Node* start_from = assembler->Parameter(2);
   Node* context = assembler->Parameter(3 + 2);
 
-  Node* int32_zero = assembler->Int32Constant(0);
-  Node* int32_one = assembler->Int32Constant(1);
+  Node* intptr_zero = assembler->IntPtrConstant(0);
+  Node* intptr_one = assembler->IntPtrConstant(1);
 
   Node* undefined = assembler->UndefinedConstant();
   Node* heap_number_map = assembler->HeapNumberMapConstant();
 
-  Variable len_var(assembler, MachineRepresentation::kWord32),
-      index_var(assembler, MachineRepresentation::kWord32),
-      start_from_var(assembler, MachineRepresentation::kWord32);
+  Variable len_var(assembler, MachineType::PointerRepresentation()),
+      index_var(assembler, MachineType::PointerRepresentation()),
+      start_from_var(assembler, MachineType::PointerRepresentation());
 
   Label init_k(assembler), return_found(assembler), return_not_found(assembler),
       call_runtime(assembler);
 
   Label init_len(assembler);
 
-  index_var.Bind(int32_zero);
-  len_var.Bind(int32_zero);
+  index_var.Bind(intptr_zero);
+  len_var.Bind(intptr_zero);
 
   // Take slow path if not a JSArray, if retrieving elements requires
   // traversing prototype, or if access checks are required.
@@ -1778,7 +1741,7 @@
     assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
 
     len_var.Bind(assembler->SmiToWord(len));
-    assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+    assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
                       &return_not_found, &init_k);
   }
 
@@ -1786,31 +1749,32 @@
   {
     Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
         init_k_zero(assembler), init_k_n(assembler);
-    Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
-    Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+    Node* tagged_n = assembler->ToInteger(context, start_from);
 
     assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
                       &init_k_heap_num);
 
     assembler->Bind(&init_k_smi);
     {
-      start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+      start_from_var.Bind(assembler->SmiUntag(tagged_n));
       assembler->Goto(&init_k_n);
     }
 
     assembler->Bind(&init_k_heap_num);
     {
       Label do_return_not_found(assembler);
-      Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+      // This round is lossless for all valid lengths.
+      Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
       Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
       assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
                         &do_return_not_found);
-      start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+      start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
+          assembler->TruncateFloat64ToWord32(fp_n)));
       assembler->Goto(&init_k_n);
 
       assembler->Bind(&do_return_not_found);
       {
-        index_var.Bind(int32_zero);
+        index_var.Bind(intptr_zero);
         assembler->Goto(&return_not_found);
       }
     }
@@ -1819,7 +1783,7 @@
     {
       Label if_positive(assembler), if_negative(assembler), done(assembler);
       assembler->Branch(
-          assembler->Int32LessThan(start_from_var.value(), int32_zero),
+          assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
           &if_negative, &if_positive);
 
       assembler->Bind(&if_positive);
@@ -1831,15 +1795,15 @@
       assembler->Bind(&if_negative);
       {
         index_var.Bind(
-            assembler->Int32Add(len_var.value(), start_from_var.value()));
+            assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
         assembler->Branch(
-            assembler->Int32LessThan(index_var.value(), int32_zero),
+            assembler->IntPtrLessThan(index_var.value(), intptr_zero),
             &init_k_zero, &done);
       }
 
       assembler->Bind(&init_k_zero);
       {
-        index_var.Bind(int32_zero);
+        index_var.Bind(intptr_zero);
         assembler->Goto(&done);
       }
 
@@ -1859,9 +1823,7 @@
                                     &if_packed_doubles, &if_holey_doubles};
 
   Node* map = assembler->LoadMap(array);
-  Node* bit_field2 = assembler->LoadMapBitField2(map);
-  Node* elements_kind =
-      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements_kind = assembler->LoadMapElementsKind(map);
   Node* elements = assembler->LoadElements(array);
   assembler->Switch(elements_kind, &return_not_found, kElementsKind,
                     element_kind_handlers, arraysize(kElementsKind));
@@ -1890,41 +1852,39 @@
 
     assembler->Bind(&not_heap_num);
     Node* search_type = assembler->LoadMapInstanceType(map);
+    assembler->GotoIf(assembler->IsStringInstanceType(search_type),
+                      &string_loop);
     assembler->GotoIf(
-        assembler->Int32LessThan(
-            search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-        &string_loop);
-    assembler->GotoIf(
-        assembler->WordEqual(search_type,
-                             assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+        assembler->Word32Equal(search_type,
+                               assembler->Int32Constant(SIMD128_VALUE_TYPE)),
         &simd_loop);
     assembler->Goto(&ident_loop);
 
     assembler->Bind(&ident_loop);
     {
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordEqual(element_k, search_element),
                         &return_found);
 
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&ident_loop);
     }
 
     assembler->Bind(&undef_loop);
     {
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordEqual(element_k, undefined),
                         &return_found);
 
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&undef_loop);
     }
 
@@ -1938,10 +1898,11 @@
       {
         Label continue_loop(assembler), not_smi(assembler);
         assembler->GotoUnless(
-            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
             &return_not_found);
-        Node* element_k =
-            assembler->LoadFixedArrayElement(elements, index_var.value());
+        Node* element_k = assembler->LoadFixedArrayElement(
+            elements, index_var.value(), 0,
+            CodeStubAssembler::INTPTR_PARAMETERS);
         assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
         assembler->Branch(
             assembler->Float64Equal(search_num.value(),
@@ -1957,7 +1918,7 @@
             &return_found, &continue_loop);
 
         assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
         assembler->Goto(&not_nan_loop);
       }
     }
@@ -1966,14 +1927,13 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
-      assembler->GotoUnless(assembler->Int32LessThan(
-                                assembler->LoadInstanceType(element_k),
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+      assembler->GotoUnless(assembler->IsStringInstanceType(
+                                assembler->LoadInstanceType(element_k)),
                             &continue_loop);
 
       // TODO(bmeurer): Consider inlining the StringEqual logic here.
@@ -1985,7 +1945,7 @@
           &return_found, &continue_loop);
 
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&string_loop);
     }
 
@@ -1998,11 +1958,11 @@
       assembler->Goto(&loop_body);
       assembler->Bind(&loop_body);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
 
-      Node* element_k =
-          assembler->LoadFixedArrayElement(elements, index_var.value());
+      Node* element_k = assembler->LoadFixedArrayElement(
+          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
 
       Node* map_k = assembler->LoadMap(element_k);
@@ -2010,7 +1970,7 @@
                                       &return_found, &continue_loop);
 
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&loop_body);
     }
   }
@@ -2039,14 +1999,15 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS);
       assembler->BranchIfFloat64Equal(element_k, search_num.value(),
                                       &return_found, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
     }
   }
@@ -2075,31 +2036,18 @@
     {
       Label continue_loop(assembler);
       assembler->GotoUnless(
-          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
 
-      if (kPointerSize == kDoubleSize) {
-        Node* element = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint64());
-        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
-        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
-                          &continue_loop);
-      } else {
-        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
-            elements, index_var.value(), MachineType::Uint32(),
-            kIeeeDoubleExponentWordOffset);
-        assembler->GotoIf(
-            assembler->Word32Equal(element_upper,
-                                   assembler->Int32Constant(kHoleNanUpper32)),
-            &continue_loop);
-      }
-
+      // Load double value or continue if it contains a double hole.
       Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64());
+          elements, index_var.value(), MachineType::Float64(), 0,
+          CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
       assembler->BranchIfFloat64Equal(element_k, search_num.value(),
                                       &return_found, &continue_loop);
       assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
       assembler->Goto(&not_nan_loop);
     }
   }
diff --git a/src/builtins/builtins-callsite.cc b/src/builtins/builtins-callsite.cc
index 7fc2f98..ae9c76d 100644
--- a/src/builtins/builtins-callsite.cc
+++ b/src/builtins/builtins-callsite.cc
@@ -14,7 +14,7 @@
 #define CHECK_CALLSITE(recv, method)                                          \
   CHECK_RECEIVER(JSObject, recv, method);                                     \
   if (!JSReceiver::HasOwnProperty(                                            \
-           recv, isolate->factory()->call_site_position_symbol())             \
+           recv, isolate->factory()->call_site_frame_array_symbol())          \
            .FromMaybe(false)) {                                               \
     THROW_NEW_ERROR_RETURN_FAILURE(                                           \
         isolate,                                                              \
@@ -29,172 +29,152 @@
   return isolate->heap()->null_value();
 }
 
+Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
+  Handle<Object> frame_array_obj = JSObject::GetDataProperty(
+      object, isolate->factory()->call_site_frame_array_symbol());
+  return Handle<FrameArray>::cast(frame_array_obj);
+}
+
+int GetFrameIndex(Isolate* isolate, Handle<JSObject> object) {
+  Handle<Object> frame_index_obj = JSObject::GetDataProperty(
+      object, isolate->factory()->call_site_frame_index_symbol());
+  return Smi::cast(*frame_index_obj)->value();
+}
+
 }  // namespace
 
 BUILTIN(CallSitePrototypeGetColumnNumber) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getColumnNumber");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return PositiveNumberOrNull(call_site.GetColumnNumber(), isolate);
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return PositiveNumberOrNull(it.Frame()->GetColumnNumber(), isolate);
 }
 
 BUILTIN(CallSitePrototypeGetEvalOrigin) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getEvalOrigin");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetEvalOrigin();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetEvalOrigin();
 }
 
 BUILTIN(CallSitePrototypeGetFileName) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getFileName");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetFileName();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetFileName();
 }
 
-namespace {
-
-bool CallSiteIsStrict(Isolate* isolate, Handle<JSObject> receiver) {
-  Handle<Object> strict;
-  Handle<Symbol> symbol = isolate->factory()->call_site_strict_symbol();
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, strict,
-                                     JSObject::GetProperty(receiver, symbol));
-  return strict->BooleanValue();
-}
-
-}  // namespace
-
 BUILTIN(CallSitePrototypeGetFunction) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getFunction");
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
 
-  if (CallSiteIsStrict(isolate, recv))
-    return *isolate->factory()->undefined_value();
-
-  Handle<Symbol> symbol = isolate->factory()->call_site_function_symbol();
-  RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+  StackFrameBase* frame = it.Frame();
+  if (frame->IsStrict()) return isolate->heap()->undefined_value();
+  return *frame->GetFunction();
 }
 
 BUILTIN(CallSitePrototypeGetFunctionName) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getFunctionName");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetFunctionName();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetFunctionName();
 }
 
 BUILTIN(CallSitePrototypeGetLineNumber) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getLineNumber");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-
-  int line_number = call_site.IsWasm() ? call_site.wasm_func_index()
-                                       : call_site.GetLineNumber();
-  return PositiveNumberOrNull(line_number, isolate);
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return PositiveNumberOrNull(it.Frame()->GetLineNumber(), isolate);
 }
 
 BUILTIN(CallSitePrototypeGetMethodName) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getMethodName");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetMethodName();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetMethodName();
 }
 
 BUILTIN(CallSitePrototypeGetPosition) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getPosition");
-
-  Handle<Symbol> symbol = isolate->factory()->call_site_position_symbol();
-  RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return Smi::FromInt(it.Frame()->GetPosition());
 }
 
 BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetScriptNameOrSourceUrl();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetScriptNameOrSourceUrl();
 }
 
 BUILTIN(CallSitePrototypeGetThis) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getThis");
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
 
-  if (CallSiteIsStrict(isolate, recv))
-    return *isolate->factory()->undefined_value();
-
-  Handle<Object> receiver;
-  Handle<Symbol> symbol = isolate->factory()->call_site_receiver_symbol();
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
-                                     JSObject::GetProperty(recv, symbol));
-
-  if (*receiver == isolate->heap()->call_site_constructor_symbol())
-    return *isolate->factory()->undefined_value();
-
-  return *receiver;
+  StackFrameBase* frame = it.Frame();
+  if (frame->IsStrict()) return isolate->heap()->undefined_value();
+  return *frame->GetReceiver();
 }
 
 BUILTIN(CallSitePrototypeGetTypeName) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "getTypeName");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return *call_site.GetTypeName();
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return *it.Frame()->GetTypeName();
 }
 
 BUILTIN(CallSitePrototypeIsConstructor) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "isConstructor");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return isolate->heap()->ToBoolean(call_site.IsConstructor());
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return isolate->heap()->ToBoolean(it.Frame()->IsConstructor());
 }
 
 BUILTIN(CallSitePrototypeIsEval) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "isEval");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return isolate->heap()->ToBoolean(call_site.IsEval());
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return isolate->heap()->ToBoolean(it.Frame()->IsEval());
 }
 
 BUILTIN(CallSitePrototypeIsNative) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "isNative");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return isolate->heap()->ToBoolean(call_site.IsNative());
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return isolate->heap()->ToBoolean(it.Frame()->IsNative());
 }
 
 BUILTIN(CallSitePrototypeIsToplevel) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "isToplevel");
-
-  CallSite call_site(isolate, recv);
-  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-  return isolate->heap()->ToBoolean(call_site.IsToplevel());
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  return isolate->heap()->ToBoolean(it.Frame()->IsToplevel());
 }
 
 BUILTIN(CallSitePrototypeToString) {
   HandleScope scope(isolate);
   CHECK_CALLSITE(recv, "toString");
-  RETURN_RESULT_OR_FAILURE(isolate, CallSiteUtils::ToString(isolate, recv));
+  FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+                        GetFrameIndex(isolate, recv));
+  RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString());
 }
 
 #undef CHECK_CALLSITE
diff --git a/src/builtins/builtins-conversion.cc b/src/builtins/builtins-conversion.cc
index 0d04a02..7fbe4f8 100644
--- a/src/builtins/builtins-conversion.cc
+++ b/src/builtins/builtins-conversion.cc
@@ -110,133 +110,99 @@
 }
 
 void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
 
   Node* input = assembler->Parameter(Descriptor::kArgument);
   Node* context = assembler->Parameter(Descriptor::kContext);
 
-  Label runtime(assembler);
+  assembler->Return(assembler->StringToNumber(context, input));
+}
 
-  // Check if string has a cached array index.
-  Node* hash = assembler->LoadNameHashField(input);
-  Node* bit = assembler->Word32And(
-      hash, assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
-  assembler->GotoIf(assembler->Word32NotEqual(bit, assembler->Int32Constant(0)),
-                    &runtime);
+void Builtins::Generate_ToName(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef TypeConversionDescriptor Descriptor;
 
-  assembler->Return(assembler->SmiTag(
-      assembler->BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
 
-  assembler->Bind(&runtime);
-  {
-    // Note: We cannot tail call to the runtime here, as js-to-wasm
-    // trampolines also use this code currently, and they declare all
-    // outgoing parameters as untagged, while we would push a tagged
-    // object here.
-    Node* result =
-        assembler->CallRuntime(Runtime::kStringToNumber, context, input);
-    assembler->Return(result);
-  }
+  assembler->Return(assembler->ToName(context, input));
+}
+
+// static
+void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(assembler->NonNumberToNumber(context, input));
 }
 
 // ES6 section 7.1.3 ToNumber ( argument )
-void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
+void Builtins::Generate_ToNumber(CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
   typedef TypeConversionDescriptor Descriptor;
 
   Node* input = assembler->Parameter(Descriptor::kArgument);
   Node* context = assembler->Parameter(Descriptor::kContext);
 
-  // We might need to loop once here due to ToPrimitive conversions.
-  Variable var_input(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_input);
-  var_input.Bind(input);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  assembler->Return(assembler->ToNumber(context, input));
+}
+
+void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label is_number(assembler);
+  Label runtime(assembler);
+
+  assembler->GotoIf(assembler->WordIsSmi(input), &is_number);
+
+  Node* input_map = assembler->LoadMap(input);
+  Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+
+  Label not_string(assembler);
+  assembler->GotoUnless(assembler->IsStringInstanceType(input_instance_type),
+                        &not_string);
+  assembler->Return(input);
+
+  Label not_heap_number(assembler);
+
+  assembler->Bind(&not_string);
   {
-    // Load the current {input} value (known to be a HeapObject).
-    Node* input = var_input.value();
+    assembler->GotoUnless(
+        assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+        &not_heap_number);
+    assembler->Goto(&is_number);
+  }
 
-    // Dispatch on the {input} instance type.
-    Node* input_instance_type = assembler->LoadInstanceType(input);
-    Label if_inputisstring(assembler), if_inputisoddball(assembler),
-        if_inputisreceiver(assembler, Label::kDeferred),
-        if_inputisother(assembler, Label::kDeferred);
-    assembler->GotoIf(assembler->Int32LessThan(
-                          input_instance_type,
-                          assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-                      &if_inputisstring);
+  assembler->Bind(&is_number);
+  {
+    // TODO(tebbi): inline as soon as NumberToString is in the CodeStubAssembler
+    Callable callable = CodeFactory::NumberToString(assembler->isolate());
+    assembler->Return(assembler->CallStub(callable, context, input));
+  }
+
+  assembler->Bind(&not_heap_number);
+  {
     assembler->GotoIf(
-        assembler->Word32Equal(input_instance_type,
-                               assembler->Int32Constant(ODDBALL_TYPE)),
-        &if_inputisoddball);
-    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-    assembler->Branch(assembler->Int32GreaterThanOrEqual(
-                          input_instance_type,
-                          assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
-                      &if_inputisreceiver, &if_inputisother);
+        assembler->Word32NotEqual(input_instance_type,
+                                  assembler->Int32Constant(ODDBALL_TYPE)),
+        &runtime);
+    assembler->Return(
+        assembler->LoadObjectField(input, Oddball::kToStringOffset));
+  }
 
-    assembler->Bind(&if_inputisstring);
-    {
-      // The {input} is a String, use the fast stub to convert it to a Number.
-      // TODO(bmeurer): Consider inlining the StringToNumber logic here.
-      Callable callable = CodeFactory::StringToNumber(assembler->isolate());
-      assembler->TailCallStub(callable, context, input);
-    }
-
-    assembler->Bind(&if_inputisoddball);
-    {
-      // The {input} is an Oddball, we just need to the Number value of it.
-      Node* result =
-          assembler->LoadObjectField(input, Oddball::kToNumberOffset);
-      assembler->Return(result);
-    }
-
-    assembler->Bind(&if_inputisreceiver);
-    {
-      // The {input} is a JSReceiver, we need to convert it to a Primitive first
-      // using the ToPrimitive type conversion, preferably yielding a Number.
-      Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-          assembler->isolate(), ToPrimitiveHint::kNumber);
-      Node* result = assembler->CallStub(callable, context, input);
-
-      // Check if the {result} is already a Number.
-      Label if_resultisnumber(assembler), if_resultisnotnumber(assembler);
-      assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisnumber);
-      Node* result_map = assembler->LoadMap(result);
-      assembler->Branch(
-          assembler->WordEqual(result_map, assembler->HeapNumberMapConstant()),
-          &if_resultisnumber, &if_resultisnotnumber);
-
-      assembler->Bind(&if_resultisnumber);
-      {
-        // The ToPrimitive conversion already gave us a Number, so we're done.
-        assembler->Return(result);
-      }
-
-      assembler->Bind(&if_resultisnotnumber);
-      {
-        // We now have a Primitive {result}, but it's not yet a Number.
-        var_input.Bind(result);
-        assembler->Goto(&loop);
-      }
-    }
-
-    assembler->Bind(&if_inputisother);
-    {
-      // The {input} is something else (i.e. Symbol or Simd128Value), let the
-      // runtime figure out the correct exception.
-      // Note: We cannot tail call to the runtime here, as js-to-wasm
-      // trampolines also use this code currently, and they declare all
-      // outgoing parameters as untagged, while we would push a tagged
-      // object here.
-      Node* result = assembler->CallRuntime(Runtime::kToNumber, context, input);
-      assembler->Return(result);
-    }
+  assembler->Bind(&runtime);
+  {
+    assembler->Return(
+        assembler->CallRuntime(Runtime::kToString, context, input));
   }
 }
 
diff --git a/src/builtins/builtins-dataview.cc b/src/builtins/builtins-dataview.cc
index 32c5a83..3d14e31 100644
--- a/src/builtins/builtins-dataview.cc
+++ b/src/builtins/builtins-dataview.cc
@@ -129,5 +129,209 @@
   return data_view->byte_offset();
 }
 
+namespace {
+
+bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+  return !is_little_endian;
+#else
+  return is_little_endian;
+#endif
+}
+
+template <size_t n>
+void CopyBytes(uint8_t* target, uint8_t const* source) {
+  for (size_t i = 0; i < n; i++) {
+    *(target++) = *(source++);
+  }
+}
+
+template <size_t n>
+void FlipBytes(uint8_t* target, uint8_t const* source) {
+  source = source + (n - 1);
+  for (size_t i = 0; i < n; i++) {
+    *(target++) = *(source--);
+  }
+}
+
+// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
+template <typename T>
+MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
+                                 Handle<Object> request_index,
+                                 bool is_little_endian) {
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, request_index,
+      Object::ToIndex(isolate, request_index,
+                      MessageTemplate::kInvalidDataViewAccessorOffset),
+      Object);
+  size_t get_index = 0;
+  if (!TryNumberToSize(*request_index, &get_index)) {
+    THROW_NEW_ERROR(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+        Object);
+  }
+  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
+                               isolate);
+  size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
+  size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
+  if (get_index + sizeof(T) > data_view_byte_length ||
+      get_index + sizeof(T) < get_index) {  // overflow
+    THROW_NEW_ERROR(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+        Object);
+  }
+  union {
+    T data;
+    uint8_t bytes[sizeof(T)];
+  } v;
+  size_t const buffer_offset = data_view_byte_offset + get_index;
+  DCHECK_GE(NumberToSize(buffer->byte_length()), buffer_offset + sizeof(T));
+  uint8_t const* const source =
+      static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+  if (NeedToFlipBytes(is_little_endian)) {
+    FlipBytes<sizeof(T)>(v.bytes, source);
+  } else {
+    CopyBytes<sizeof(T)>(v.bytes, source);
+  }
+  return isolate->factory()->NewNumber(v.data);
+}
+
+template <typename T>
+T DataViewConvertValue(double value);
+
+template <>
+int8_t DataViewConvertValue<int8_t>(double value) {
+  return static_cast<int8_t>(DoubleToInt32(value));
+}
+
+template <>
+int16_t DataViewConvertValue<int16_t>(double value) {
+  return static_cast<int16_t>(DoubleToInt32(value));
+}
+
+template <>
+int32_t DataViewConvertValue<int32_t>(double value) {
+  return DoubleToInt32(value);
+}
+
+template <>
+uint8_t DataViewConvertValue<uint8_t>(double value) {
+  return static_cast<uint8_t>(DoubleToUint32(value));
+}
+
+template <>
+uint16_t DataViewConvertValue<uint16_t>(double value) {
+  return static_cast<uint16_t>(DoubleToUint32(value));
+}
+
+template <>
+uint32_t DataViewConvertValue<uint32_t>(double value) {
+  return DoubleToUint32(value);
+}
+
+template <>
+float DataViewConvertValue<float>(double value) {
+  return static_cast<float>(value);
+}
+
+template <>
+double DataViewConvertValue<double>(double value) {
+  return value;
+}
+
+// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
+//                                    value)
+template <typename T>
+MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
+                                 Handle<Object> request_index,
+                                 bool is_little_endian, Handle<Object> value) {
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, request_index,
+      Object::ToIndex(isolate, request_index,
+                      MessageTemplate::kInvalidDataViewAccessorOffset),
+      Object);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::ToNumber(value), Object);
+  size_t get_index = 0;
+  if (!TryNumberToSize(*request_index, &get_index)) {
+    THROW_NEW_ERROR(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+        Object);
+  }
+  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
+                               isolate);
+  size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
+  size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
+  if (get_index + sizeof(T) > data_view_byte_length ||
+      get_index + sizeof(T) < get_index) {  // overflow
+    THROW_NEW_ERROR(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+        Object);
+  }
+  union {
+    T data;
+    uint8_t bytes[sizeof(T)];
+  } v;
+  v.data = DataViewConvertValue<T>(value->Number());
+  size_t const buffer_offset = data_view_byte_offset + get_index;
+  DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
+  uint8_t* const target =
+      static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+  if (NeedToFlipBytes(is_little_endian)) {
+    FlipBytes<sizeof(T)>(target, v.bytes);
+  } else {
+    CopyBytes<sizeof(T)>(target, v.bytes);
+  }
+  return isolate->factory()->undefined_value();
+}
+
+}  // namespace
+
+#define DATA_VIEW_PROTOTYPE_GET(Type, type)                                \
+  BUILTIN(DataViewPrototypeGet##Type) {                                    \
+    HandleScope scope(isolate);                                            \
+    CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.get" #Type); \
+    Handle<Object> byte_offset = args.atOrUndefined(isolate, 1);           \
+    Handle<Object> is_little_endian = args.atOrUndefined(isolate, 2);      \
+    Handle<Object> result;                                                 \
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                    \
+        isolate, result,                                                   \
+        GetViewValue<type>(isolate, data_view, byte_offset,                \
+                           is_little_endian->BooleanValue()));             \
+    return *result;                                                        \
+  }
+DATA_VIEW_PROTOTYPE_GET(Int8, int8_t)
+DATA_VIEW_PROTOTYPE_GET(Uint8, uint8_t)
+DATA_VIEW_PROTOTYPE_GET(Int16, int16_t)
+DATA_VIEW_PROTOTYPE_GET(Uint16, uint16_t)
+DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
+DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
+DATA_VIEW_PROTOTYPE_GET(Float32, float)
+DATA_VIEW_PROTOTYPE_GET(Float64, double)
+#undef DATA_VIEW_PROTOTYPE_GET
+
+#define DATA_VIEW_PROTOTYPE_SET(Type, type)                                \
+  BUILTIN(DataViewPrototypeSet##Type) {                                    \
+    HandleScope scope(isolate);                                            \
+    CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.set" #Type); \
+    Handle<Object> byte_offset = args.atOrUndefined(isolate, 1);           \
+    Handle<Object> value = args.atOrUndefined(isolate, 2);                 \
+    Handle<Object> is_little_endian = args.atOrUndefined(isolate, 3);      \
+    Handle<Object> result;                                                 \
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                    \
+        isolate, result,                                                   \
+        SetViewValue<type>(isolate, data_view, byte_offset,                \
+                           is_little_endian->BooleanValue(), value));      \
+    return *result;                                                        \
+  }
+DATA_VIEW_PROTOTYPE_SET(Int8, int8_t)
+DATA_VIEW_PROTOTYPE_SET(Uint8, uint8_t)
+DATA_VIEW_PROTOTYPE_SET(Int16, int16_t)
+DATA_VIEW_PROTOTYPE_SET(Uint16, uint16_t)
+DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
+DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
+DATA_VIEW_PROTOTYPE_SET(Float32, float)
+DATA_VIEW_PROTOTYPE_SET(Float64, double)
+#undef DATA_VIEW_PROTOTYPE_SET
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-date.cc b/src/builtins/builtins-date.cc
index d5c3476..205c8c9 100644
--- a/src/builtins/builtins-date.cc
+++ b/src/builtins/builtins-date.cc
@@ -909,93 +909,156 @@
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kDay);
+void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
+                                               int field_index) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Label receiver_not_date(assembler, Label::kDeferred);
+
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &receiver_not_date);
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  assembler->GotoIf(
+      assembler->Word32NotEqual(receiver_instance_type,
+                                assembler->Int32Constant(JS_DATE_TYPE)),
+      &receiver_not_date);
+
+  // Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    assembler->Return(
+        assembler->LoadObjectField(receiver, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch(assembler, Label::kDeferred);
+      Node* date_cache_stamp = assembler->Load(
+          MachineType::AnyTagged(),
+          assembler->ExternalConstant(
+              ExternalReference::date_cache_stamp(assembler->isolate())));
+
+      Node* cache_stamp =
+          assembler->LoadObjectField(receiver, JSDate::kCacheStampOffset);
+      assembler->GotoIf(assembler->WordNotEqual(date_cache_stamp, cache_stamp),
+                        &stamp_mismatch);
+      assembler->Return(assembler->LoadObjectField(
+          receiver, JSDate::kValueOffset + field_index * kPointerSize));
+
+      assembler->Bind(&stamp_mismatch);
+    }
+
+    Node* field_index_smi = assembler->SmiConstant(Smi::FromInt(field_index));
+    Node* function = assembler->ExternalConstant(
+        ExternalReference::get_date_field_function(assembler->isolate()));
+    Node* result = assembler->CallCFunction2(
+        MachineType::AnyTagged(), MachineType::Pointer(),
+        MachineType::AnyTagged(), function, receiver, field_index_smi);
+    assembler->Return(result);
+  }
+
+  // Raise a TypeError if the receiver is not a date.
+  assembler->Bind(&receiver_not_date);
+  {
+    Node* result = assembler->CallRuntime(Runtime::kThrowNotDateError, context);
+    assembler->Return(result);
+  }
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+void Builtins::Generate_DatePrototypeGetDate(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kDay);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kYear);
+void Builtins::Generate_DatePrototypeGetDay(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kWeekday);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kHour);
+void Builtins::Generate_DatePrototypeGetFullYear(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kYear);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+void Builtins::Generate_DatePrototypeGetHours(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kHour);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+void Builtins::Generate_DatePrototypeGetMilliseconds(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMillisecond);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+void Builtins::Generate_DatePrototypeGetMinutes(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMinute);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+void Builtins::Generate_DatePrototypeGetMonth(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMonth);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+void Builtins::Generate_DatePrototypeGetSeconds(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kSecond);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+void Builtins::Generate_DatePrototypeGetTime(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kDateValue);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kTimezoneOffset);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDate(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kDayUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+void Builtins::Generate_DatePrototypeGetUTCDay(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kWeekdayUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+void Builtins::Generate_DatePrototypeGetUTCFullYear(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kYearUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+void Builtins::Generate_DatePrototypeGetUTCHours(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kHourUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMillisecondUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+void Builtins::Generate_DatePrototypeGetUTCMinutes(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMinuteUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
-  Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+void Builtins::Generate_DatePrototypeGetUTCMonth(CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kMonthUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCSeconds(
+    CodeStubAssembler* assembler) {
+  Generate_DatePrototype_GetField(assembler, JSDate::kSecondUTC);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-global.cc b/src/builtins/builtins-global.cc
index d99a553..2205788 100644
--- a/src/builtins/builtins-global.cc
+++ b/src/builtins/builtins-global.cc
@@ -5,6 +5,7 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/code-factory.h"
 #include "src/compiler.h"
 #include "src/uri.h"
 
@@ -99,5 +100,113 @@
       Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
 }
 
+// ES6 section 18.2.2 isFinite ( number )
+void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_num(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_num);
+  var_num.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {num} value.
+    Node* num = var_num.value();
+
+    // Check if {num} is a Smi or a HeapObject.
+    assembler->GotoIf(assembler->WordIsSmi(num), &return_true);
+
+    // Check if {num} is a HeapNumber.
+    Label if_numisheapnumber(assembler),
+        if_numisnotheapnumber(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
+                                           assembler->HeapNumberMapConstant()),
+                      &if_numisheapnumber, &if_numisnotheapnumber);
+
+    assembler->Bind(&if_numisheapnumber);
+    {
+      // Check if {num} contains a finite, non-NaN value.
+      Node* num_value = assembler->LoadHeapNumberValue(num);
+      assembler->BranchIfFloat64IsNaN(
+          assembler->Float64Sub(num_value, num_value), &return_false,
+          &return_true);
+    }
+
+    assembler->Bind(&if_numisnotheapnumber);
+    {
+      // Need to convert {num} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+      var_num.Bind(assembler->CallStub(callable, context, num));
+      assembler->Goto(&loop);
+    }
+  }
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 18.2.3 isNaN ( number )
+void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_num(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_num);
+  var_num.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {num} value.
+    Node* num = var_num.value();
+
+    // Check if {num} is a Smi or a HeapObject.
+    assembler->GotoIf(assembler->WordIsSmi(num), &return_false);
+
+    // Check if {num} is a HeapNumber.
+    Label if_numisheapnumber(assembler),
+        if_numisnotheapnumber(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
+                                           assembler->HeapNumberMapConstant()),
+                      &if_numisheapnumber, &if_numisnotheapnumber);
+
+    assembler->Bind(&if_numisheapnumber);
+    {
+      // Check if {num} contains a NaN.
+      Node* num_value = assembler->LoadHeapNumberValue(num);
+      assembler->BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
+    }
+
+    assembler->Bind(&if_numisnotheapnumber);
+    {
+      // Need to convert {num} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+      var_num.Bind(assembler->CallStub(callable, context, num));
+      assembler->Goto(&loop);
+    }
+  }
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-handler.cc b/src/builtins/builtins-handler.cc
index 8b3df79..ebbc978 100644
--- a/src/builtins/builtins-handler.cc
+++ b/src/builtins/builtins-handler.cc
@@ -14,6 +14,21 @@
   KeyedLoadIC::GenerateMegamorphic(masm);
 }
 
+void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
+    CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+  assembler->KeyedLoadICGeneric(&p);
+}
+
 void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
   KeyedLoadIC::GenerateMiss(masm);
 }
@@ -34,7 +49,7 @@
 }
 
 void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
-  ElementHandlerCompiler::GenerateStoreSlow(masm);
+  KeyedStoreIC::GenerateSlow(masm);
 }
 
 void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
@@ -105,8 +120,8 @@
   Node* vector = assembler->Parameter(Descriptor::kVector);
   Node* context = assembler->Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, receiver, name,
-                             value, slot, vector);
+  assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                             vector, receiver, name);
 }
 
 void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
diff --git a/src/builtins/builtins-internal.cc b/src/builtins/builtins-internal.cc
index 87c5dd5..bec6ff3 100644
--- a/src/builtins/builtins-internal.cc
+++ b/src/builtins/builtins-internal.cc
@@ -64,12 +64,9 @@
   // Load the {object}s elements.
   Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
 
-  CodeStubAssembler::ParameterMode mode =
-      assembler->Is64() ? CodeStubAssembler::INTEGER_PARAMETERS
-                        : CodeStubAssembler::SMI_PARAMETERS;
-  Node* length = (mode == CodeStubAssembler::INTEGER_PARAMETERS)
-                     ? assembler->LoadAndUntagFixedArrayBaseLength(source)
-                     : assembler->LoadFixedArrayBaseLength(source);
+  CodeStubAssembler::ParameterMode mode = assembler->OptimalParameterMode();
+  Node* length = assembler->UntagParameter(
+      assembler->LoadFixedArrayBaseLength(source), mode);
 
   // Check if we can allocate in new space.
   ElementsKind kind = FAST_ELEMENTS;
@@ -111,9 +108,8 @@
 
   Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
   Node* elements = assembler->LoadElements(object);
-  elements = assembler->CheckAndGrowElementsCapacity(
-      context, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
-  assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+  elements = assembler->TryGrowElementsCapacity(
+      object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
   assembler->Return(elements);
 
   assembler->Bind(&runtime);
@@ -132,9 +128,8 @@
 
   Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
   Node* elements = assembler->LoadElements(object);
-  elements = assembler->CheckAndGrowElementsCapacity(
-      context, elements, FAST_ELEMENTS, key, &runtime);
-  assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+  elements = assembler->TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
+                                                key, &runtime);
   assembler->Return(elements);
 
   assembler->Bind(&runtime);
diff --git a/src/builtins/builtins-interpreter.cc b/src/builtins/builtins-interpreter.cc
index 900172f..1609184 100644
--- a/src/builtins/builtins-interpreter.cc
+++ b/src/builtins/builtins-interpreter.cc
@@ -50,5 +50,27 @@
                                                  CallableType::kJSFunction);
 }
 
+Handle<Code> Builtins::InterpreterPushArgsAndConstruct(
+    CallableType function_type) {
+  switch (function_type) {
+    case CallableType::kJSFunction:
+      return InterpreterPushArgsAndConstructFunction();
+    case CallableType::kAny:
+      return InterpreterPushArgsAndConstruct();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndConstructImpl(masm, CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstructFunction(
+    MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndConstructImpl(
+      masm, CallableType::kJSFunction);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-iterator.cc b/src/builtins/builtins-iterator.cc
new file mode 100644
index 0000000..7b91e36
--- /dev/null
+++ b/src/builtins/builtins-iterator.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_IteratorPrototypeIterator(
+    CodeStubAssembler* assembler) {
+  assembler->Return(assembler->Parameter(0));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-number.cc b/src/builtins/builtins-number.cc
index c2af0fd..1762844 100644
--- a/src/builtins/builtins-number.cc
+++ b/src/builtins/builtins-number.cc
@@ -11,6 +11,144 @@
 // -----------------------------------------------------------------------------
 // ES6 section 20.1 Number Objects
 
+// ES6 section 20.1.2.2 Number.isFinite ( number )
+void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* number = assembler->Parameter(1);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // Check if {number} is a Smi.
+  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+  // Check if {number} is a HeapNumber.
+  assembler->GotoUnless(
+      assembler->WordEqual(assembler->LoadMap(number),
+                           assembler->HeapNumberMapConstant()),
+      &return_false);
+
+  // Check if {number} contains a finite, non-NaN value.
+  Node* number_value = assembler->LoadHeapNumberValue(number);
+  assembler->BranchIfFloat64IsNaN(
+      assembler->Float64Sub(number_value, number_value), &return_false,
+      &return_true);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.3 Number.isInteger ( number )
+void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* number = assembler->Parameter(1);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // Check if {number} is a Smi.
+  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+  // Check if {number} is a HeapNumber.
+  assembler->GotoUnless(
+      assembler->WordEqual(assembler->LoadMap(number),
+                           assembler->HeapNumberMapConstant()),
+      &return_false);
+
+  // Load the actual value of {number}.
+  Node* number_value = assembler->LoadHeapNumberValue(number);
+
+  // Truncate the value of {number} to an integer (or an infinity).
+  Node* integer = assembler->Float64Trunc(number_value);
+
+  // Check if {number}s value matches the integer (ruling out the infinities).
+  assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer),
+                                  assembler->Float64Constant(0.0), &return_true,
+                                  &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.4 Number.isNaN ( number )
+void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* number = assembler->Parameter(1);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // Check if {number} is a Smi.
+  assembler->GotoIf(assembler->WordIsSmi(number), &return_false);
+
+  // Check if {number} is a HeapNumber.
+  assembler->GotoUnless(
+      assembler->WordEqual(assembler->LoadMap(number),
+                           assembler->HeapNumberMapConstant()),
+      &return_false);
+
+  // Check if {number} contains a NaN value.
+  Node* number_value = assembler->LoadHeapNumberValue(number);
+  assembler->BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
+void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* number = assembler->Parameter(1);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // Check if {number} is a Smi.
+  assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+  // Check if {number} is a HeapNumber.
+  assembler->GotoUnless(
+      assembler->WordEqual(assembler->LoadMap(number),
+                           assembler->HeapNumberMapConstant()),
+      &return_false);
+
+  // Load the actual value of {number}.
+  Node* number_value = assembler->LoadHeapNumberValue(number);
+
+  // Truncate the value of {number} to an integer (or an infinity).
+  Node* integer = assembler->Float64Trunc(number_value);
+
+  // Check if {number}s value matches the integer (ruling out the infinities).
+  assembler->GotoUnless(
+      assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
+                              assembler->Float64Constant(0.0)),
+      &return_false);
+
+  // Check if the {integer} value is in safe integer range.
+  assembler->BranchIfFloat64LessThanOrEqual(
+      assembler->Float64Abs(integer),
+      assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
 // ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
 BUILTIN(NumberPrototypeToExponential) {
   HandleScope scope(isolate);
diff --git a/src/builtins/builtins-object.cc b/src/builtins/builtins-object.cc
index c422145..78df2d6 100644
--- a/src/builtins/builtins-object.cc
+++ b/src/builtins/builtins-object.cc
@@ -35,7 +35,7 @@
   Node* map = assembler->LoadMap(object);
   Node* instance_type = assembler->LoadMapInstanceType(map);
 
-  Variable var_index(assembler, MachineRepresentation::kWord32);
+  Variable var_index(assembler, MachineType::PointerRepresentation());
 
   Label keyisindex(assembler), if_iskeyunique(assembler);
   assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
@@ -230,10 +230,8 @@
   {
     Node* instance_type = assembler->LoadInstanceType(object);
 
-    assembler->Branch(
-        assembler->Int32LessThan(
-            instance_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-        if_string, if_notstring);
+    assembler->Branch(assembler->IsStringInstanceType(instance_type), if_string,
+                      if_notstring);
   }
 }
 
@@ -259,10 +257,8 @@
                        CodeStubAssembler::Label* return_string,
                        CodeStubAssembler::Label* return_boolean,
                        CodeStubAssembler::Label* return_number) {
-  assembler->GotoIf(
-      assembler->Int32LessThan(instance_type,
-                               assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-      return_string);
+  assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
+                    return_string);
 
   assembler->GotoIf(assembler->Word32Equal(
                         instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
@@ -910,5 +906,18 @@
   return *object;
 }
 
+// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
+void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CompareDescriptor Descriptor;
+
+  Node* constructor = assembler->Parameter(Descriptor::kLeft);
+  Node* object = assembler->Parameter(Descriptor::kRight);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Return(
+      assembler->OrdinaryHasInstance(context, constructor, object));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-regexp.cc b/src/builtins/builtins-regexp.cc
new file mode 100644
index 0000000..371221f
--- /dev/null
+++ b/src/builtins/builtins-regexp.cc
@@ -0,0 +1,441 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+
+#include "src/code-factory.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.2 RegExp Objects
+
+namespace {
+
+// ES#sec-isregexp IsRegExp ( argument )
+Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object) {
+  if (!object->IsJSReceiver()) return Just(false);
+
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+  if (isolate->regexp_function()->initial_map() == receiver->map()) {
+    // Fast-path for unmodified JSRegExp instances.
+    return Just(true);
+  }
+
+  Handle<Object> match;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, match,
+      JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
+      Nothing<bool>());
+
+  if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
+  return Just(object->IsJSRegExp());
+}
+
+Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
+  static const int kMaxFlagsLength = 5 + 1;  // 5 flags and '\0';
+  char flags_string[kMaxFlagsLength];
+  int i = 0;
+
+  const JSRegExp::Flags flags = regexp->GetFlags();
+
+  if ((flags & JSRegExp::kGlobal) != 0) flags_string[i++] = 'g';
+  if ((flags & JSRegExp::kIgnoreCase) != 0) flags_string[i++] = 'i';
+  if ((flags & JSRegExp::kMultiline) != 0) flags_string[i++] = 'm';
+  if ((flags & JSRegExp::kUnicode) != 0) flags_string[i++] = 'u';
+  if ((flags & JSRegExp::kSticky) != 0) flags_string[i++] = 'y';
+
+  DCHECK_LT(i, kMaxFlagsLength);
+  memset(&flags_string[i], '\0', kMaxFlagsLength - i);
+
+  return isolate->factory()->NewStringFromAsciiChecked(flags_string);
+}
+
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
+MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
+                                       Handle<JSRegExp> regexp,
+                                       Handle<Object> pattern,
+                                       Handle<Object> flags) {
+  Handle<String> pattern_string;
+  if (pattern->IsUndefined(isolate)) {
+    pattern_string = isolate->factory()->empty_string();
+  } else {
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, pattern_string,
+                               Object::ToString(isolate, pattern), JSRegExp);
+  }
+
+  Handle<String> flags_string;
+  if (flags->IsUndefined(isolate)) {
+    flags_string = isolate->factory()->empty_string();
+  } else {
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, flags_string,
+                               Object::ToString(isolate, flags), JSRegExp);
+  }
+
+  // TODO(jgruber): We could avoid the flags back and forth conversions.
+  RETURN_RESULT(isolate,
+                JSRegExp::Initialize(regexp, pattern_string, flags_string),
+                JSRegExp);
+}
+
+}  // namespace
+
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
+BUILTIN(RegExpConstructor) {
+  HandleScope scope(isolate);
+
+  Handle<HeapObject> new_target = args.new_target();
+  Handle<Object> pattern = args.atOrUndefined(isolate, 1);
+  Handle<Object> flags = args.atOrUndefined(isolate, 2);
+
+  Handle<JSFunction> target = isolate->regexp_function();
+
+  bool pattern_is_regexp;
+  {
+    Maybe<bool> maybe_pattern_is_regexp = IsRegExp(isolate, pattern);
+    if (maybe_pattern_is_regexp.IsNothing()) {
+      DCHECK(isolate->has_pending_exception());
+      return isolate->heap()->exception();
+    }
+    pattern_is_regexp = maybe_pattern_is_regexp.FromJust();
+  }
+
+  if (new_target->IsUndefined(isolate)) {
+    new_target = target;
+
+    // ES6 section 21.2.3.1 step 3.b
+    if (pattern_is_regexp && flags->IsUndefined(isolate)) {
+      Handle<Object> pattern_constructor;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, pattern_constructor,
+          Object::GetProperty(pattern,
+                              isolate->factory()->constructor_string()));
+
+      if (pattern_constructor.is_identical_to(new_target)) {
+        return *pattern;
+      }
+    }
+  }
+
+  if (pattern->IsJSRegExp()) {
+    Handle<JSRegExp> regexp_pattern = Handle<JSRegExp>::cast(pattern);
+
+    if (flags->IsUndefined(isolate)) {
+      flags = PatternFlags(isolate, regexp_pattern);
+    }
+    pattern = handle(regexp_pattern->source(), isolate);
+  } else if (pattern_is_regexp) {
+    Handle<Object> pattern_source;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, pattern_source,
+        Object::GetProperty(pattern, isolate->factory()->source_string()));
+
+    if (flags->IsUndefined(isolate)) {
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, flags,
+          Object::GetProperty(pattern, isolate->factory()->flags_string()));
+    }
+    pattern = pattern_source;
+  }
+
+  Handle<JSReceiver> new_target_receiver = Handle<JSReceiver>::cast(new_target);
+
+  // TODO(jgruber): Fast-path for target == new_target == unmodified JSRegExp.
+
+  Handle<JSObject> object;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, object, JSObject::New(target, new_target_receiver));
+  Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
+
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           RegExpInitialize(isolate, regexp, pattern, flags));
+}
+
+namespace {
+
+compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                              compiler::Node* has_initialmap,
+                              compiler::Node* regexp) {
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Variable var_value(a, MachineRepresentation::kTagged);
+
+  Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+  a->Branch(has_initialmap, &if_unmodified, &if_modified);
+
+  a->Bind(&if_unmodified);
+  {
+    // Load the in-object field.
+    static const int field_offset =
+        JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+    var_value.Bind(a->LoadObjectField(regexp, field_offset));
+    a->Goto(&out);
+  }
+
+  a->Bind(&if_modified);
+  {
+    // Load through the GetProperty stub.
+    Node* const name =
+        a->HeapConstant(a->isolate()->factory()->last_index_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+    var_value.Bind(a->CallStub(getproperty_callable, context, regexp, name));
+    a->Goto(&out);
+  }
+
+  a->Bind(&out);
+  return var_value.value();
+}
+
+void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+                    compiler::Node* has_initialmap, compiler::Node* regexp,
+                    compiler::Node* value) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+  a->Branch(has_initialmap, &if_unmodified, &if_modified);
+
+  a->Bind(&if_unmodified);
+  {
+    // Store the in-object field.
+    static const int field_offset =
+        JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+    a->StoreObjectField(regexp, field_offset, value);
+    a->Goto(&out);
+  }
+
+  a->Bind(&if_modified);
+  {
+    // Store through runtime.
+    // TODO(ishell): Use SetPropertyStub here once available.
+    Node* const name =
+        a->HeapConstant(a->isolate()->factory()->last_index_string());
+    Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
+    a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+                   language_mode);
+    a->Goto(&out);
+  }
+
+  a->Bind(&out);
+}
+
+compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
+                                                CodeStubAssembler* a,
+                                                compiler::Node* context,
+                                                compiler::Node* match_elements,
+                                                compiler::Node* string) {
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Label out(a);
+
+  CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+  Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
+      match_elements, a->IntPtrConstant(RegExpImpl::kLastCaptureCount), 0,
+      mode));
+  Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
+  Node* const start = a->LoadFixedArrayElement(
+      match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture), 0, mode);
+  Node* const end = a->LoadFixedArrayElement(
+      match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1), 0,
+      mode);
+
+  // Calculate the substring of the first match before creating the result array
+  // to avoid an unnecessary write barrier storing the first result.
+  Node* const first = a->SubString(context, string, start, end);
+
+  Node* const result =
+      a->AllocateRegExpResult(context, num_results, start, string);
+  Node* const result_elements = a->LoadElements(result);
+
+  a->StoreFixedArrayElement(result_elements, a->IntPtrConstant(0), first,
+                            SKIP_WRITE_BARRIER);
+
+  a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
+
+  // Store all remaining captures.
+  Node* const limit =
+      a->IntPtrAdd(a->IntPtrConstant(RegExpImpl::kFirstCapture), num_indices);
+
+  Variable var_from_cursor(a, MachineType::PointerRepresentation());
+  Variable var_to_cursor(a, MachineType::PointerRepresentation());
+
+  var_from_cursor.Bind(a->IntPtrConstant(RegExpImpl::kFirstCapture + 2));
+  var_to_cursor.Bind(a->IntPtrConstant(1));
+
+  Variable* vars[] = {&var_from_cursor, &var_to_cursor};
+  Label loop(a, 2, vars);
+
+  a->Goto(&loop);
+  a->Bind(&loop);
+  {
+    Node* const from_cursor = var_from_cursor.value();
+    Node* const to_cursor = var_to_cursor.value();
+    Node* const start = a->LoadFixedArrayElement(match_elements, from_cursor);
+
+    Label next_iter(a);
+    a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
+
+    Node* const from_cursor_plus1 =
+        a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
+    Node* const end =
+        a->LoadFixedArrayElement(match_elements, from_cursor_plus1);
+
+    Node* const capture = a->SubString(context, string, start, end);
+    a->StoreFixedArrayElement(result_elements, to_cursor, capture);
+    a->Goto(&next_iter);
+
+    a->Bind(&next_iter);
+    var_from_cursor.Bind(a->IntPtrAdd(from_cursor, a->IntPtrConstant(2)));
+    var_to_cursor.Bind(a->IntPtrAdd(to_cursor, a->IntPtrConstant(1)));
+    a->Branch(a->UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+  }
+
+  a->Bind(&out);
+  return result;
+}
+
+}  // namespace
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Isolate* const isolate = a->isolate();
+
+  Node* const receiver = a->Parameter(0);
+  Node* const maybe_string = a->Parameter(1);
+  Node* const context = a->Parameter(4);
+
+  Node* const null = a->NullConstant();
+  Node* const int_zero = a->IntPtrConstant(0);
+  Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+
+  // Ensure {receiver} is a JSRegExp.
+  Node* const regexp_map = a->ThrowIfNotInstanceType(
+      context, receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+  Node* const regexp = receiver;
+
+  // Check whether the regexp instance is unmodified.
+  Node* const native_context = a->LoadNativeContext(context);
+  Node* const regexp_fun =
+      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+  Node* const initial_map =
+      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = a->WordEqual(regexp_map, initial_map);
+
+  // Convert {maybe_string} to a string.
+  Callable tostring_callable = CodeFactory::ToString(isolate);
+  Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+  Node* const string_length = a->LoadStringLength(string);
+
+  // Check whether the regexp is global or sticky, which determines whether we
+  // update last index later on.
+  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Node* const is_global_or_sticky =
+      a->WordAnd(a->SmiUntag(flags),
+                 a->IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
+  Node* const should_update_last_index =
+      a->WordNotEqual(is_global_or_sticky, int_zero);
+
+  // Grab and possibly update last index.
+  Label run_exec(a);
+  Variable var_lastindex(a, MachineRepresentation::kTagged);
+  {
+    Label if_doupdate(a), if_dontupdate(a);
+    a->Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
+
+    a->Bind(&if_doupdate);
+    {
+      Node* const regexp_lastindex =
+          LoadLastIndex(a, context, has_initialmap, regexp);
+
+      Callable tolength_callable = CodeFactory::ToLength(isolate);
+      Node* const lastindex =
+          a->CallStub(tolength_callable, context, regexp_lastindex);
+      var_lastindex.Bind(lastindex);
+
+      Label if_isoob(a, Label::kDeferred);
+      a->GotoUnless(a->WordIsSmi(lastindex), &if_isoob);
+      a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+      a->Goto(&run_exec);
+
+      a->Bind(&if_isoob);
+      {
+        StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+        a->Return(null);
+      }
+    }
+
+    a->Bind(&if_dontupdate);
+    {
+      var_lastindex.Bind(smi_zero);
+      a->Goto(&run_exec);
+    }
+  }
+
+  Node* match_indices;
+  Label successful_match(a);
+  a->Bind(&run_exec);
+  {
+    // Get last match info from the context.
+    Node* const last_match_info = a->LoadContextElement(
+        native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+    // Call the exec stub.
+    Callable exec_callable = CodeFactory::RegExpExec(isolate);
+    match_indices = a->CallStub(exec_callable, context, regexp, string,
+                                var_lastindex.value(), last_match_info);
+
+    // {match_indices} is either null or the RegExpLastMatchInfo array.
+    // Return early if exec failed, possibly updating last index.
+    a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
+
+    Label return_null(a);
+    a->GotoUnless(should_update_last_index, &return_null);
+
+    StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+    a->Goto(&return_null);
+
+    a->Bind(&return_null);
+    a->Return(null);
+  }
+
+  Label construct_result(a);
+  a->Bind(&successful_match);
+  {
+    Node* const match_elements = a->LoadElements(match_indices);
+
+    a->GotoUnless(should_update_last_index, &construct_result);
+
+    // Update the new last index from {match_indices}.
+    Node* const new_lastindex = a->LoadFixedArrayElement(
+        match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1));
+
+    StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
+    a->Goto(&construct_result);
+
+    a->Bind(&construct_result);
+    {
+      Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
+                                                     match_elements, string);
+      a->Return(result);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc
index 23d4f43..6aad4da 100644
--- a/src/builtins/builtins-sharedarraybuffer.cc
+++ b/src/builtins/builtins-sharedarraybuffer.cc
@@ -141,6 +141,7 @@
   using namespace compiler;
   // Check if the index is in bounds. If not, throw RangeError.
   CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
+  // TODO(jkummerow): Use unsigned comparison instead of "i<0 || i>length".
   a->Branch(
       a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
                 a->Int32GreaterThanOrEqual(index_word, array_length_word)),
@@ -227,8 +228,7 @@
   ValidateAtomicIndex(a, index_word32, array_length_word32, context);
   Node* index_word = a->ChangeUint32ToWord(index_word32);
 
-  Callable to_integer = CodeFactory::ToInteger(a->isolate());
-  Node* value_integer = a->CallStub(to_integer, context, value);
+  Node* value_integer = a->ToInteger(context, value);
   Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
 
   CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
@@ -248,8 +248,8 @@
   a->Return(value_integer);
 
   a->Bind(&u16);
-  a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
-                           a->WordShl(index_word, 1), value_word32));
+  a->AtomicStore(MachineRepresentation::kWord16, backing_store,
+                 a->WordShl(index_word, 1), value_word32);
   a->Return(value_integer);
 
   a->Bind(&u32);
diff --git a/src/builtins/builtins-string.cc b/src/builtins/builtins-string.cc
index d38f6b0..68d2bd0 100644
--- a/src/builtins/builtins-string.cc
+++ b/src/builtins/builtins-string.cc
@@ -10,6 +10,408 @@
 namespace v8 {
 namespace internal {
 
+namespace {
+
+enum ResultMode { kDontNegateResult, kNegateResult };
+
+void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
+  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+  // mode; for kNegateResult mode we properly negate the result.
+  //
+  // if (lhs == rhs) return true;
+  // if (lhs->length() != rhs->length()) return false;
+  // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
+  //   return false;
+  // }
+  // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
+  //   for (i = 0; i != lhs->length(); ++i) {
+  //     if (lhs[i] != rhs[i]) return false;
+  //   }
+  //   return true;
+  // }
+  // return %StringEqual(lhs, rhs);
+
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label if_equal(assembler), if_notequal(assembler);
+
+  // Fast check to see if {lhs} and {rhs} refer to the same String object.
+  Label if_same(assembler), if_notsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  assembler->Bind(&if_same);
+  assembler->Goto(&if_equal);
+
+  assembler->Bind(&if_notsame);
+  {
+    // The {lhs} and {rhs} don't refer to the exact same String object.
+
+    // Load the length of {lhs} and {rhs}.
+    Node* lhs_length = assembler->LoadStringLength(lhs);
+    Node* rhs_length = assembler->LoadStringLength(rhs);
+
+    // Check if the lengths of {lhs} and {rhs} are equal.
+    Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+    assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
+                      &if_lengthisequal, &if_lengthisnotequal);
+
+    assembler->Bind(&if_lengthisequal);
+    {
+      // Load instance types of {lhs} and {rhs}.
+      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+      // Combine the instance types into a single 16-bit value, so we can check
+      // both of them at once.
+      Node* both_instance_types = assembler->Word32Or(
+          lhs_instance_type,
+          assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+      // Check if both {lhs} and {rhs} are internalized.
+      int const kBothInternalizedMask =
+          kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+      int const kBothInternalizedTag =
+          kInternalizedTag | (kInternalizedTag << 8);
+      Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
+      assembler->Branch(assembler->Word32Equal(
+                            assembler->Word32And(both_instance_types,
+                                                 assembler->Int32Constant(
+                                                     kBothInternalizedMask)),
+                            assembler->Int32Constant(kBothInternalizedTag)),
+                        &if_bothinternalized, &if_notbothinternalized);
+
+      assembler->Bind(&if_bothinternalized);
+      {
+        // Fast negative check for internalized-to-internalized equality.
+        assembler->Goto(&if_notequal);
+      }
+
+      assembler->Bind(&if_notbothinternalized);
+      {
+        // Check that both {lhs} and {rhs} are flat one-byte strings.
+        int const kBothSeqOneByteStringMask =
+            kStringEncodingMask | kStringRepresentationMask |
+            ((kStringEncodingMask | kStringRepresentationMask) << 8);
+        int const kBothSeqOneByteStringTag =
+            kOneByteStringTag | kSeqStringTag |
+            ((kOneByteStringTag | kSeqStringTag) << 8);
+        Label if_bothonebyteseqstrings(assembler),
+            if_notbothonebyteseqstrings(assembler);
+        assembler->Branch(
+            assembler->Word32Equal(
+                assembler->Word32And(
+                    both_instance_types,
+                    assembler->Int32Constant(kBothSeqOneByteStringMask)),
+                assembler->Int32Constant(kBothSeqOneByteStringTag)),
+            &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+        assembler->Bind(&if_bothonebyteseqstrings);
+        {
+          // Compute the effective offset of the first character.
+          Node* begin = assembler->IntPtrConstant(
+              SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+          // Compute the first offset after the string from the length.
+          Node* end =
+              assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
+
+          // Loop over the {lhs} and {rhs} strings to see if they are equal.
+          Variable var_offset(assembler, MachineType::PointerRepresentation());
+          Label loop(assembler, &var_offset);
+          var_offset.Bind(begin);
+          assembler->Goto(&loop);
+          assembler->Bind(&loop);
+          {
+            // Check if {offset} equals {end}.
+            Node* offset = var_offset.value();
+            Label if_done(assembler), if_notdone(assembler);
+            assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+                              &if_notdone);
+
+            assembler->Bind(&if_notdone);
+            {
+              // Load the next characters from {lhs} and {rhs}.
+              Node* lhs_value =
+                  assembler->Load(MachineType::Uint8(), lhs, offset);
+              Node* rhs_value =
+                  assembler->Load(MachineType::Uint8(), rhs, offset);
+
+              // Check if the characters match.
+              Label if_valueissame(assembler), if_valueisnotsame(assembler);
+              assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+                                &if_valueissame, &if_valueisnotsame);
+
+              assembler->Bind(&if_valueissame);
+              {
+                // Advance to next character.
+                var_offset.Bind(
+                    assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+              }
+              assembler->Goto(&loop);
+
+              assembler->Bind(&if_valueisnotsame);
+              assembler->Goto(&if_notequal);
+            }
+
+            assembler->Bind(&if_done);
+            assembler->Goto(&if_equal);
+          }
+        }
+
+        assembler->Bind(&if_notbothonebyteseqstrings);
+        {
+          // TODO(bmeurer): Add fast case support for flattened cons strings;
+          // also add support for two byte string equality checks.
+          Runtime::FunctionId function_id = (mode == kDontNegateResult)
+                                                ? Runtime::kStringEqual
+                                                : Runtime::kStringNotEqual;
+          assembler->TailCallRuntime(function_id, context, lhs, rhs);
+        }
+      }
+    }
+
+    assembler->Bind(&if_lengthisnotequal);
+    {
+      // Mismatch in length of {lhs} and {rhs}, cannot be equal.
+      assembler->Goto(&if_notequal);
+    }
+  }
+
+  assembler->Bind(&if_equal);
+  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+  assembler->Bind(&if_notequal);
+  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+enum RelationalComparisonMode {
+  kLessThan,
+  kLessThanOrEqual,
+  kGreaterThan,
+  kGreaterThanOrEqual
+};
+
+void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
+                                        RelationalComparisonMode mode) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+
+  // Fast check to see if {lhs} and {rhs} refer to the same String object.
+  Label if_same(assembler), if_notsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  assembler->Bind(&if_same);
+  assembler->Goto(&if_equal);
+
+  assembler->Bind(&if_notsame);
+  {
+    // Load instance types of {lhs} and {rhs}.
+    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+    // Combine the instance types into a single 16-bit value, so we can check
+    // both of them at once.
+    Node* both_instance_types = assembler->Word32Or(
+        lhs_instance_type,
+        assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+    // Check that both {lhs} and {rhs} are flat one-byte strings.
+    int const kBothSeqOneByteStringMask =
+        kStringEncodingMask | kStringRepresentationMask |
+        ((kStringEncodingMask | kStringRepresentationMask) << 8);
+    int const kBothSeqOneByteStringTag =
+        kOneByteStringTag | kSeqStringTag |
+        ((kOneByteStringTag | kSeqStringTag) << 8);
+    Label if_bothonebyteseqstrings(assembler),
+        if_notbothonebyteseqstrings(assembler);
+    assembler->Branch(assembler->Word32Equal(
+                          assembler->Word32And(both_instance_types,
+                                               assembler->Int32Constant(
+                                                   kBothSeqOneByteStringMask)),
+                          assembler->Int32Constant(kBothSeqOneByteStringTag)),
+                      &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+    assembler->Bind(&if_bothonebyteseqstrings);
+    {
+      // Load the length of {lhs} and {rhs}.
+      Node* lhs_length = assembler->LoadStringLength(lhs);
+      Node* rhs_length = assembler->LoadStringLength(rhs);
+
+      // Determine the minimum length.
+      Node* length = assembler->SmiMin(lhs_length, rhs_length);
+
+      // Compute the effective offset of the first character.
+      Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                              kHeapObjectTag);
+
+      // Compute the first offset after the string from the length.
+      Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
+
+      // Loop over the {lhs} and {rhs} strings to see if they are equal.
+      Variable var_offset(assembler, MachineType::PointerRepresentation());
+      Label loop(assembler, &var_offset);
+      var_offset.Bind(begin);
+      assembler->Goto(&loop);
+      assembler->Bind(&loop);
+      {
+        // Check if {offset} equals {end}.
+        Node* offset = var_offset.value();
+        Label if_done(assembler), if_notdone(assembler);
+        assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+                          &if_notdone);
+
+        assembler->Bind(&if_notdone);
+        {
+          // Load the next characters from {lhs} and {rhs}.
+          Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
+          Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
+
+          // Check if the characters match.
+          Label if_valueissame(assembler), if_valueisnotsame(assembler);
+          assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+                            &if_valueissame, &if_valueisnotsame);
+
+          assembler->Bind(&if_valueissame);
+          {
+            // Advance to next character.
+            var_offset.Bind(
+                assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+          }
+          assembler->Goto(&loop);
+
+          assembler->Bind(&if_valueisnotsame);
+          assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
+                              &if_less, &if_greater);
+        }
+
+        assembler->Bind(&if_done);
+        {
+          // All characters up to the min length are equal, decide based on
+          // string length.
+          Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+          assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
+                            &if_lengthisequal, &if_lengthisnotequal);
+
+          assembler->Bind(&if_lengthisequal);
+          assembler->Goto(&if_equal);
+
+          assembler->Bind(&if_lengthisnotequal);
+          assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
+                                         &if_greater);
+        }
+      }
+    }
+
+    assembler->Bind(&if_notbothonebyteseqstrings);
+    {
+      // TODO(bmeurer): Add fast case support for flattened cons strings;
+      // also add support for two byte string relational comparisons.
+      switch (mode) {
+        case kLessThan:
+          assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
+                                     rhs);
+          break;
+        case kLessThanOrEqual:
+          assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
+                                     lhs, rhs);
+          break;
+        case kGreaterThan:
+          assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
+                                     rhs);
+          break;
+        case kGreaterThanOrEqual:
+          assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
+                                     context, lhs, rhs);
+          break;
+      }
+    }
+  }
+
+  assembler->Bind(&if_less);
+  switch (mode) {
+    case kLessThan:
+    case kLessThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+
+    case kGreaterThan:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+  }
+
+  assembler->Bind(&if_equal);
+  switch (mode) {
+    case kLessThan:
+    case kGreaterThan:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+
+    case kLessThanOrEqual:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+  }
+
+  assembler->Bind(&if_greater);
+  switch (mode) {
+    case kLessThan:
+    case kLessThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+
+    case kGreaterThan:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+  }
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
+  GenerateStringEqual(assembler, kDontNegateResult);
+}
+
+// static
+void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
+  GenerateStringEqual(assembler, kNegateResult);
+}
+
+// static
+void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
+  GenerateStringRelationalComparison(assembler, kLessThan);
+}
+
+// static
+void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
+  GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+// static
+void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
+  GenerateStringRelationalComparison(assembler, kGreaterThan);
+}
+
+// static
+void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
+  GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
 // -----------------------------------------------------------------------------
 // ES6 section 21.1 String Objects
 
@@ -294,7 +696,6 @@
 void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
 
   Node* receiver = assembler->Parameter(0);
   Node* position = assembler->Parameter(1);
@@ -306,73 +707,24 @@
 
   // Convert the {position} to a Smi and check that it's in bounds of the
   // {receiver}.
-  // TODO(bmeurer): Find an abstraction for this!
   {
-    // Check if the {position} is already a Smi.
-    Variable var_position(assembler, MachineRepresentation::kTagged);
-    var_position.Bind(position);
-    Label if_positionissmi(assembler),
-        if_positionisnotsmi(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
-                      &if_positionisnotsmi);
-    assembler->Bind(&if_positionisnotsmi);
-    {
-      // Convert the {position} to an Integer via the ToIntegerStub.
-      Callable callable = CodeFactory::ToInteger(assembler->isolate());
-      Node* index = assembler->CallStub(callable, context, position);
-
-      // Check if the resulting {index} is now a Smi.
-      Label if_indexissmi(assembler, Label::kDeferred),
-          if_indexisnotsmi(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
-                        &if_indexisnotsmi);
-
-      assembler->Bind(&if_indexissmi);
-      {
-        var_position.Bind(index);
-        assembler->Goto(&if_positionissmi);
-      }
-
-      assembler->Bind(&if_indexisnotsmi);
-      {
-        // The ToIntegerStub canonicalizes everything in Smi range to Smi
-        // representation, so any HeapNumber returned is not in Smi range.
-        // The only exception here is -0.0, which we treat as 0.
-        Node* index_value = assembler->LoadHeapNumberValue(index);
-        Label if_indexiszero(assembler, Label::kDeferred),
-            if_indexisnotzero(assembler, Label::kDeferred);
-        assembler->Branch(assembler->Float64Equal(
-                              index_value, assembler->Float64Constant(0.0)),
-                          &if_indexiszero, &if_indexisnotzero);
-
-        assembler->Bind(&if_indexiszero);
-        {
-          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
-          assembler->Goto(&if_positionissmi);
-        }
-
-        assembler->Bind(&if_indexisnotzero);
-        {
-          // The {index} is some other integral Number, that is definitely
-          // neither -0.0 nor in Smi range.
-          assembler->Return(assembler->EmptyStringConstant());
-        }
-      }
-    }
-    assembler->Bind(&if_positionissmi);
-    position = var_position.value();
+    Label return_emptystring(assembler, Label::kDeferred);
+    position = assembler->ToInteger(context, position,
+                                    CodeStubAssembler::kTruncateMinusZero);
+    assembler->GotoUnless(assembler->WordIsSmi(position), &return_emptystring);
 
     // Determine the actual length of the {receiver} String.
     Node* receiver_length =
         assembler->LoadObjectField(receiver, String::kLengthOffset);
 
     // Return "" if the Smi {position} is outside the bounds of the {receiver}.
-    Label if_positioninbounds(assembler),
-        if_positionnotinbounds(assembler, Label::kDeferred);
+    Label if_positioninbounds(assembler);
     assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
-                      &if_positionnotinbounds, &if_positioninbounds);
-    assembler->Bind(&if_positionnotinbounds);
+                      &return_emptystring, &if_positioninbounds);
+
+    assembler->Bind(&return_emptystring);
     assembler->Return(assembler->EmptyStringConstant());
+
     assembler->Bind(&if_positioninbounds);
   }
 
@@ -389,7 +741,6 @@
     CodeStubAssembler* assembler) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
 
   Node* receiver = assembler->Parameter(0);
   Node* position = assembler->Parameter(1);
@@ -401,73 +752,24 @@
 
   // Convert the {position} to a Smi and check that it's in bounds of the
   // {receiver}.
-  // TODO(bmeurer): Find an abstraction for this!
   {
-    // Check if the {position} is already a Smi.
-    Variable var_position(assembler, MachineRepresentation::kTagged);
-    var_position.Bind(position);
-    Label if_positionissmi(assembler),
-        if_positionisnotsmi(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
-                      &if_positionisnotsmi);
-    assembler->Bind(&if_positionisnotsmi);
-    {
-      // Convert the {position} to an Integer via the ToIntegerStub.
-      Callable callable = CodeFactory::ToInteger(assembler->isolate());
-      Node* index = assembler->CallStub(callable, context, position);
-
-      // Check if the resulting {index} is now a Smi.
-      Label if_indexissmi(assembler, Label::kDeferred),
-          if_indexisnotsmi(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
-                        &if_indexisnotsmi);
-
-      assembler->Bind(&if_indexissmi);
-      {
-        var_position.Bind(index);
-        assembler->Goto(&if_positionissmi);
-      }
-
-      assembler->Bind(&if_indexisnotsmi);
-      {
-        // The ToIntegerStub canonicalizes everything in Smi range to Smi
-        // representation, so any HeapNumber returned is not in Smi range.
-        // The only exception here is -0.0, which we treat as 0.
-        Node* index_value = assembler->LoadHeapNumberValue(index);
-        Label if_indexiszero(assembler, Label::kDeferred),
-            if_indexisnotzero(assembler, Label::kDeferred);
-        assembler->Branch(assembler->Float64Equal(
-                              index_value, assembler->Float64Constant(0.0)),
-                          &if_indexiszero, &if_indexisnotzero);
-
-        assembler->Bind(&if_indexiszero);
-        {
-          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
-          assembler->Goto(&if_positionissmi);
-        }
-
-        assembler->Bind(&if_indexisnotzero);
-        {
-          // The {index} is some other integral Number, that is definitely
-          // neither -0.0 nor in Smi range.
-          assembler->Return(assembler->NaNConstant());
-        }
-      }
-    }
-    assembler->Bind(&if_positionissmi);
-    position = var_position.value();
+    Label return_nan(assembler, Label::kDeferred);
+    position = assembler->ToInteger(context, position,
+                                    CodeStubAssembler::kTruncateMinusZero);
+    assembler->GotoUnless(assembler->WordIsSmi(position), &return_nan);
 
     // Determine the actual length of the {receiver} String.
     Node* receiver_length =
         assembler->LoadObjectField(receiver, String::kLengthOffset);
 
     // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
-    Label if_positioninbounds(assembler),
-        if_positionnotinbounds(assembler, Label::kDeferred);
+    Label if_positioninbounds(assembler);
     assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
-                      &if_positionnotinbounds, &if_positioninbounds);
-    assembler->Bind(&if_positionnotinbounds);
+                      &return_nan, &if_positioninbounds);
+
+    assembler->Bind(&return_nan);
     assembler->Return(assembler->NaNConstant());
+
     assembler->Bind(&if_positioninbounds);
   }
 
@@ -477,6 +779,333 @@
   assembler->Return(result);
 }
 
+// ES6 section 21.1.3.9
+// String.prototype.lastIndexOf ( searchString [ , position ] )
+BUILTIN(StringPrototypeLastIndexOf) {
+  HandleScope handle_scope(isolate);
+  return String::LastIndexOf(isolate, args.receiver(),
+                             args.atOrUndefined(isolate, 1),
+                             args.atOrUndefined(isolate, 2));
+}
+
+// ES6 section 21.1.3.10 String.prototype.localeCompare ( that )
+//
+// This function is implementation specific.  For now, we do not
+// do anything locale specific.
+// If internationalization is enabled, then i18n.js will override this function
+// and provide the proper functionality, so this is just a fallback.
+BUILTIN(StringPrototypeLocaleCompare) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+
+  TO_THIS_STRING(str1, "String.prototype.localeCompare");
+  Handle<String> str2;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
+
+  if (str1.is_identical_to(str2)) return Smi::FromInt(0);  // Equal.
+  int str1_length = str1->length();
+  int str2_length = str2->length();
+
+  // Decide trivial cases without flattening.
+  if (str1_length == 0) {
+    if (str2_length == 0) return Smi::FromInt(0);  // Equal.
+    return Smi::FromInt(-str2_length);
+  } else {
+    if (str2_length == 0) return Smi::FromInt(str1_length);
+  }
+
+  int end = str1_length < str2_length ? str1_length : str2_length;
+
+  // No need to flatten if we are going to find the answer on the first
+  // character. At this point we know there is at least one character
+  // in each string, due to the trivial case handling above.
+  int d = str1->Get(0) - str2->Get(0);
+  if (d != 0) return Smi::FromInt(d);
+
+  str1 = String::Flatten(str1);
+  str2 = String::Flatten(str2);
+
+  DisallowHeapAllocation no_gc;
+  String::FlatContent flat1 = str1->GetFlatContent();
+  String::FlatContent flat2 = str2->GetFlatContent();
+
+  for (int i = 0; i < end; i++) {
+    if (flat1.Get(i) != flat2.Get(i)) {
+      return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
+    }
+  }
+
+  return Smi::FromInt(str1_length - str2_length);
+}
+
+// ES6 section 21.1.3.12 String.prototype.normalize ( [form] )
+//
+// Simply checks the argument is valid and returns the string itself.
+// If internationalization is enabled, then i18n.js will override this function
+// and provide the proper functionality, so this is just a fallback.
+BUILTIN(StringPrototypeNormalize) {
+  HandleScope handle_scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.normalize");
+
+  Handle<Object> form_input = args.atOrUndefined(isolate, 1);
+  if (form_input->IsUndefined(isolate)) return *string;
+
+  Handle<String> form;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
+                                     Object::ToString(isolate, form_input));
+
+  if (!(String::Equals(form,
+                       isolate->factory()->NewStringFromStaticChars("NFC")) ||
+        String::Equals(form,
+                       isolate->factory()->NewStringFromStaticChars("NFD")) ||
+        String::Equals(form,
+                       isolate->factory()->NewStringFromStaticChars("NFKC")) ||
+        String::Equals(form,
+                       isolate->factory()->NewStringFromStaticChars("NFKD")))) {
+    Handle<String> valid_forms =
+        isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewRangeError(MessageTemplate::kNormalizationForm, valid_forms));
+  }
+
+  return *string;
+}
+
+// ES6 section B.2.3.1 String.prototype.substr ( start, length )
+void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label out(a), handle_length(a);
+
+  Variable var_start(a, MachineRepresentation::kTagged);
+  Variable var_length(a, MachineRepresentation::kTagged);
+
+  Node* const receiver = a->Parameter(0);
+  Node* const start = a->Parameter(1);
+  Node* const length = a->Parameter(2);
+  Node* const context = a->Parameter(5);
+
+  Node* const zero = a->SmiConstant(Smi::FromInt(0));
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  Node* const string =
+      a->ToThisString(context, receiver, "String.prototype.substr");
+
+  Node* const string_length = a->LoadStringLength(string);
+
+  // Conversions and bounds-checks for {start}.
+  {
+    Node* const start_int =
+        a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+
+    Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+    a->Branch(a->WordIsSmi(start_int), &if_issmi, &if_isheapnumber);
+
+    a->Bind(&if_issmi);
+    {
+      Node* const length_plus_start = a->SmiAdd(string_length, start_int);
+      var_start.Bind(a->Select(a->SmiLessThan(start_int, zero),
+                               a->SmiMax(length_plus_start, zero), start_int));
+      a->Goto(&handle_length);
+    }
+
+    a->Bind(&if_isheapnumber);
+    {
+      // If {start} is a heap number, it is definitely out of bounds. If it is
+      // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
+      // positive, set {start} to {string_length} which ultimately results in
+      // returning an empty string.
+      Node* const float_zero = a->Float64Constant(0.);
+      Node* const start_float = a->LoadHeapNumberValue(start_int);
+      var_start.Bind(a->Select(a->Float64LessThan(start_float, float_zero),
+                               zero, string_length));
+      a->Goto(&handle_length);
+    }
+  }
+
+  // Conversions and bounds-checks for {length}.
+  a->Bind(&handle_length);
+  {
+    Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+
+    // Default to {string_length} if {length} is undefined.
+    {
+      Label if_isundefined(a, Label::kDeferred), if_isnotundefined(a);
+      a->Branch(a->WordEqual(length, a->UndefinedConstant()), &if_isundefined,
+                &if_isnotundefined);
+
+      a->Bind(&if_isundefined);
+      var_length.Bind(string_length);
+      a->Goto(&if_issmi);
+
+      a->Bind(&if_isnotundefined);
+      var_length.Bind(
+          a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+    }
+
+    a->Branch(a->WordIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+
+    // Set {length} to min(max({length}, 0), {string_length} - {start}
+    a->Bind(&if_issmi);
+    {
+      Node* const positive_length = a->SmiMax(var_length.value(), zero);
+
+      Node* const minimal_length = a->SmiSub(string_length, var_start.value());
+      var_length.Bind(a->SmiMin(positive_length, minimal_length));
+
+      a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
+      a->Return(a->EmptyStringConstant());
+    }
+
+    a->Bind(&if_isheapnumber);
+    {
+      // If {length} is a heap number, it is definitely out of bounds. There are
+      // two cases according to the spec: if it is negative, "" is returned; if
+      // it is positive, then length is set to {string_length} - {start}.
+
+      a->Assert(a->WordEqual(a->LoadMap(var_length.value()),
+                             a->HeapNumberMapConstant()));
+
+      Label if_isnegative(a), if_ispositive(a);
+      Node* const float_zero = a->Float64Constant(0.);
+      Node* const length_float = a->LoadHeapNumberValue(var_length.value());
+      a->Branch(a->Float64LessThan(length_float, float_zero), &if_isnegative,
+                &if_ispositive);
+
+      a->Bind(&if_isnegative);
+      a->Return(a->EmptyStringConstant());
+
+      a->Bind(&if_ispositive);
+      {
+        var_length.Bind(a->SmiSub(string_length, var_start.value()));
+        a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
+        a->Return(a->EmptyStringConstant());
+      }
+    }
+  }
+
+  a->Bind(&out);
+  {
+    Node* const end = a->SmiAdd(var_start.value(), var_length.value());
+    Node* const result = a->SubString(context, string, var_start.value(), end);
+    a->Return(result);
+  }
+}
+
+namespace {
+
+compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
+                                    compiler::Node* context,
+                                    compiler::Node* value,
+                                    compiler::Node* limit) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label out(a);
+  Variable var_result(a, MachineRepresentation::kTagged);
+
+  Node* const value_int =
+      a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+
+  Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
+  a->Branch(a->WordIsSmi(value_int), &if_issmi, &if_isnotsmi);
+
+  a->Bind(&if_issmi);
+  {
+    Label if_isinbounds(a), if_isoutofbounds(a, Label::kDeferred);
+    a->Branch(a->SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+
+    a->Bind(&if_isinbounds);
+    {
+      var_result.Bind(value_int);
+      a->Goto(&out);
+    }
+
+    a->Bind(&if_isoutofbounds);
+    {
+      Node* const zero = a->SmiConstant(Smi::FromInt(0));
+      var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
+      a->Goto(&out);
+    }
+  }
+
+  a->Bind(&if_isnotsmi);
+  {
+    // {value} is a heap number - in this case, it is definitely out of bounds.
+    a->Assert(a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
+
+    Node* const float_zero = a->Float64Constant(0.);
+    Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+    Node* const value_float = a->LoadHeapNumberValue(value_int);
+    var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
+                              smi_zero, limit));
+    a->Goto(&out);
+  }
+
+  a->Bind(&out);
+  return var_result.value();
+}
+
+}  // namespace
+
+// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
+void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label out(a);
+
+  Variable var_start(a, MachineRepresentation::kTagged);
+  Variable var_end(a, MachineRepresentation::kTagged);
+
+  Node* const receiver = a->Parameter(0);
+  Node* const start = a->Parameter(1);
+  Node* const end = a->Parameter(2);
+  Node* const context = a->Parameter(5);
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  Node* const string =
+      a->ToThisString(context, receiver, "String.prototype.substring");
+
+  Node* const length = a->LoadStringLength(string);
+
+  // Conversion and bounds-checks for {start}.
+  var_start.Bind(ToSmiBetweenZeroAnd(a, context, start, length));
+
+  // Conversion and bounds-checks for {end}.
+  {
+    var_end.Bind(length);
+    a->GotoIf(a->WordEqual(end, a->UndefinedConstant()), &out);
+
+    var_end.Bind(ToSmiBetweenZeroAnd(a, context, end, length));
+
+    Label if_endislessthanstart(a);
+    a->Branch(a->SmiLessThan(var_end.value(), var_start.value()),
+              &if_endislessthanstart, &out);
+
+    a->Bind(&if_endislessthanstart);
+    {
+      Node* const tmp = var_end.value();
+      var_end.Bind(var_start.value());
+      var_start.Bind(tmp);
+      a->Goto(&out);
+    }
+  }
+
+  a->Bind(&out);
+  {
+    Node* result =
+        a->SubString(context, string, var_start.value(), var_end.value());
+    a->Return(result);
+  }
+}
+
 // ES6 section 21.1.3.25 String.prototype.toString ()
 void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
@@ -522,5 +1151,203 @@
   assembler->Return(result);
 }
 
+void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* string = assembler->ToThisString(context, receiver,
+                                         "String.prototype[Symbol.iterator]");
+
+  Node* native_context = assembler->LoadNativeContext(context);
+  Node* map = assembler->LoadFixedArrayElement(
+      native_context,
+      assembler->IntPtrConstant(Context::STRING_ITERATOR_MAP_INDEX), 0,
+      CodeStubAssembler::INTPTR_PARAMETERS);
+  Node* iterator = assembler->Allocate(JSStringIterator::kSize);
+  assembler->StoreMapNoWriteBarrier(iterator, map);
+  assembler->StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+                                  Heap::kEmptyFixedArrayRootIndex);
+  assembler->StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
+                                  Heap::kEmptyFixedArrayRootIndex);
+  assembler->StoreObjectFieldNoWriteBarrier(
+      iterator, JSStringIterator::kStringOffset, string);
+  Node* index = assembler->SmiConstant(Smi::FromInt(0));
+  assembler->StoreObjectFieldNoWriteBarrier(
+      iterator, JSStringIterator::kNextIndexOffset, index);
+  assembler->Return(iterator);
+}
+
+namespace {
+
+// Return the |word32| codepoint at {index}. Supports SeqStrings and
+// ExternalStrings.
+compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
+                                          compiler::Node* string,
+                                          compiler::Node* length,
+                                          compiler::Node* index,
+                                          UnicodeEncoding encoding) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+  Label handle_surrogate_pair(assembler), return_result(assembler);
+  Variable var_result(assembler, MachineRepresentation::kWord32);
+  Variable var_trail(assembler, MachineRepresentation::kWord16);
+  var_result.Bind(assembler->StringCharCodeAt(string, index));
+  var_trail.Bind(assembler->Int32Constant(0));
+
+  assembler->GotoIf(assembler->Word32NotEqual(
+                        assembler->Word32And(var_result.value(),
+                                             assembler->Int32Constant(0xFC00)),
+                        assembler->Int32Constant(0xD800)),
+                    &return_result);
+  Node* next_index =
+      assembler->SmiAdd(index, assembler->SmiConstant(Smi::FromInt(1)));
+
+  assembler->GotoUnless(assembler->SmiLessThan(next_index, length),
+                        &return_result);
+  var_trail.Bind(assembler->StringCharCodeAt(string, next_index));
+  assembler->Branch(assembler->Word32Equal(
+                        assembler->Word32And(var_trail.value(),
+                                             assembler->Int32Constant(0xFC00)),
+                        assembler->Int32Constant(0xDC00)),
+                    &handle_surrogate_pair, &return_result);
+
+  assembler->Bind(&handle_surrogate_pair);
+  {
+    Node* lead = var_result.value();
+    Node* trail = var_trail.value();
+#ifdef ENABLE_SLOW_DCHECKS
+    // Check that this path is only taken if a surrogate pair is found
+    assembler->Assert(assembler->Uint32GreaterThanOrEqual(
+        lead, assembler->Int32Constant(0xD800)));
+    assembler->Assert(
+        assembler->Uint32LessThan(lead, assembler->Int32Constant(0xDC00)));
+    assembler->Assert(assembler->Uint32GreaterThanOrEqual(
+        trail, assembler->Int32Constant(0xDC00)));
+    assembler->Assert(
+        assembler->Uint32LessThan(trail, assembler->Int32Constant(0xE000)));
+#endif
+
+    switch (encoding) {
+      case UnicodeEncoding::UTF16:
+        var_result.Bind(assembler->WordOr(
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+            assembler->WordShl(lead, assembler->Int32Constant(16)), trail));
+#else
+            assembler->WordShl(trail, assembler->Int32Constant(16)), lead));
+#endif
+        break;
+
+      case UnicodeEncoding::UTF32: {
+        // Convert UTF16 surrogate pair into |word32| code point, encoded as
+        // UTF32.
+        Node* surrogate_offset =
+            assembler->Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+
+        // (lead << 10) + trail + SURROGATE_OFFSET
+        var_result.Bind(assembler->Int32Add(
+            assembler->WordShl(lead, assembler->Int32Constant(10)),
+            assembler->Int32Add(trail, surrogate_offset)));
+        break;
+      }
+    }
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&return_result);
+  return var_result.value();
+}
+
+compiler::Node* LoadSurrogatePairAt(CodeStubAssembler* assembler,
+                                    compiler::Node* string,
+                                    compiler::Node* length,
+                                    compiler::Node* index) {
+  return LoadSurrogatePairInternal(assembler, string, length, index,
+                                   UnicodeEncoding::UTF16);
+}
+
+}  // namespace
+
+void Builtins::Generate_StringIteratorPrototypeNext(
+    CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Variable var_value(assembler, MachineRepresentation::kTagged);
+  Variable var_done(assembler, MachineRepresentation::kTagged);
+
+  var_value.Bind(assembler->UndefinedConstant());
+  var_done.Bind(assembler->BooleanConstant(true));
+
+  Label throw_bad_receiver(assembler), next_codepoint(assembler),
+      return_result(assembler);
+
+  Node* iterator = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  assembler->GotoIf(assembler->WordIsSmi(iterator), &throw_bad_receiver);
+  assembler->GotoUnless(
+      assembler->WordEqual(assembler->LoadInstanceType(iterator),
+                           assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
+      &throw_bad_receiver);
+
+  Node* string =
+      assembler->LoadObjectField(iterator, JSStringIterator::kStringOffset);
+  Node* position =
+      assembler->LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
+  Node* length = assembler->LoadObjectField(string, String::kLengthOffset);
+
+  assembler->Branch(assembler->SmiLessThan(position, length), &next_codepoint,
+                    &return_result);
+
+  assembler->Bind(&next_codepoint);
+  {
+    Node* ch = LoadSurrogatePairAt(assembler, string, length, position);
+    Node* value = assembler->StringFromCodePoint(ch, UnicodeEncoding::UTF16);
+    var_value.Bind(value);
+    Node* length = assembler->LoadObjectField(value, String::kLengthOffset);
+    assembler->StoreObjectFieldNoWriteBarrier(
+        iterator, JSStringIterator::kNextIndexOffset,
+        assembler->SmiAdd(position, length));
+    var_done.Bind(assembler->BooleanConstant(false));
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&return_result);
+  {
+    Node* native_context = assembler->LoadNativeContext(context);
+    Node* map = assembler->LoadFixedArrayElement(
+        native_context,
+        assembler->IntPtrConstant(Context::ITERATOR_RESULT_MAP_INDEX), 0,
+        CodeStubAssembler::INTPTR_PARAMETERS);
+    Node* result = assembler->Allocate(JSIteratorResult::kSize);
+    assembler->StoreMapNoWriteBarrier(result, map);
+    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+                                    Heap::kEmptyFixedArrayRootIndex);
+    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+                                    Heap::kEmptyFixedArrayRootIndex);
+    assembler->StoreObjectFieldNoWriteBarrier(
+        result, JSIteratorResult::kValueOffset, var_value.value());
+    assembler->StoreObjectFieldNoWriteBarrier(
+        result, JSIteratorResult::kDoneOffset, var_done.value());
+    assembler->Return(result);
+  }
+
+  assembler->Bind(&throw_bad_receiver);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            "String Iterator.prototype.next", TENURED)),
+        iterator);
+    assembler->Return(result);  // Never reached.
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-utils.h b/src/builtins/builtins-utils.h
index 90b58c7..ca1786c 100644
--- a/src/builtins/builtins-utils.h
+++ b/src/builtins/builtins-utils.h
@@ -76,32 +76,31 @@
 // through the BuiltinArguments object args.
 // TODO(cbruni): add global flag to check whether any tracing events have been
 // enabled.
-// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
-#define BUILTIN(name)                                                        \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
-                                                     Isolate* isolate);      \
-                                                                             \
-  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                      \
-      int args_length, Object** args_object, Isolate* isolate) {             \
-    BuiltinArguments args(args_length, args_object);                         \
-    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                           \
-        isolate, &tracing::TraceEventStatsTable::Builtin_##name);            \
-    return Builtin_Impl_##name(args, isolate);                               \
-  }                                                                          \
-                                                                             \
-  MUST_USE_RESULT Object* Builtin_##name(                                    \
-      int args_length, Object** args_object, Isolate* isolate) {             \
-    CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
-    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||      \
-                    FLAG_runtime_call_stats)) {                              \
-      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);   \
-    }                                                                        \
-    BuiltinArguments args(args_length, args_object);                         \
-    return Builtin_Impl_##name(args, isolate);                               \
-  }                                                                          \
-                                                                             \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
+#define BUILTIN(name)                                                         \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,   \
+                                                     Isolate* isolate);       \
+                                                                              \
+  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                       \
+      int args_length, Object** args_object, Isolate* isolate) {              \
+    BuiltinArguments args(args_length, args_object);                          \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name);  \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                     \
+                 "V8.Builtin_" #name);                                        \
+    return Builtin_Impl_##name(args, isolate);                                \
+  }                                                                           \
+                                                                              \
+  MUST_USE_RESULT Object* Builtin_##name(                                     \
+      int args_length, Object** args_object, Isolate* isolate) {              \
+    DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||       \
+                    FLAG_runtime_call_stats)) {                               \
+      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);    \
+    }                                                                         \
+    BuiltinArguments args(args_length, args_object);                          \
+    return Builtin_Impl_##name(args, isolate);                                \
+  }                                                                           \
+                                                                              \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,   \
                                                      Isolate* isolate)
 
 // ----------------------------------------------------------------------------
diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h
index f8ce2e6..3579f3c 100644
--- a/src/builtins/builtins.h
+++ b/src/builtins/builtins.h
@@ -49,27 +49,6 @@
 //      Args: name
 #define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG)                       \
   ASM(Abort)                                                                  \
-  /* Handlers */                                                              \
-  ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState)                \
-  ASM(KeyedLoadIC_Miss)                                                       \
-  ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC)                         \
-  ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)              \
-  ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                        \
-      StoreICState::kStrictModeState)                                         \
-  ASM(KeyedStoreIC_Miss)                                                      \
-  ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC)                       \
-  TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
-  TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
-  ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState)                       \
-  TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector)                  \
-  ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC)                                  \
-  TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector)                    \
-  TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector)                \
-  ASH(StoreIC_Normal, HANDLER, Code::STORE_IC)                                \
-  ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState)      \
-  TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector)           \
-  TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector)           \
-                                                                              \
   /* Code aging */                                                            \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM)                       \
                                                                               \
@@ -118,14 +97,24 @@
   ASM(InterruptCheck)                                                         \
   ASM(StackCheck)                                                             \
                                                                               \
+  /* String helpers */                                                        \
+  TFS(StringEqual, BUILTIN, kNoExtraICState, Compare)                         \
+  TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare)                      \
+  TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare)                      \
+  TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare)               \
+  TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare)                   \
+  TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare)            \
+                                                                              \
   /* Interpreter */                                                           \
   ASM(InterpreterEntryTrampoline)                                             \
   ASM(InterpreterMarkBaselineOnReturn)                                        \
   ASM(InterpreterPushArgsAndCall)                                             \
   ASM(InterpreterPushArgsAndCallFunction)                                     \
-  ASM(InterpreterPushArgsAndConstruct)                                        \
   ASM(InterpreterPushArgsAndTailCall)                                         \
   ASM(InterpreterPushArgsAndTailCallFunction)                                 \
+  ASM(InterpreterPushArgsAndConstruct)                                        \
+  ASM(InterpreterPushArgsAndConstructFunction)                                \
+  ASM(InterpreterPushArgsAndConstructArray)                                   \
   ASM(InterpreterEnterBytecodeDispatch)                                       \
   ASM(InterpreterOnStackReplacement)                                          \
                                                                               \
@@ -162,6 +151,7 @@
   TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements)    \
   TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
       GrowArrayElements)                                                      \
+  TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare)                 \
                                                                               \
   /* Debugger */                                                              \
   DBG(FrameDropper_LiveEdit)                                                  \
@@ -179,8 +169,33 @@
   TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState,               \
       TypeConversion)                                                         \
   TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion)               \
+  TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion)                       \
   TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion)            \
-  ASM(ToNumber)                                                               \
+  TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion)                     \
+  TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion)                     \
+                                                                              \
+  /* Handlers */                                                              \
+  ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState)                \
+  TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState,             \
+      LoadWithVector)                                                         \
+  ASM(KeyedLoadIC_Miss)                                                       \
+  ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC)                         \
+  ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)              \
+  ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                        \
+      StoreICState::kStrictModeState)                                         \
+  ASM(KeyedStoreIC_Miss)                                                      \
+  ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC)                       \
+  TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
+  TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+  ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState)                       \
+  TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector)                  \
+  ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC)                                  \
+  TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector)                    \
+  TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector)                \
+  ASH(StoreIC_Normal, HANDLER, Code::STORE_IC)                                \
+  ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState)      \
+  TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector)           \
+  TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector)           \
                                                                               \
   /* Built-in functions for Javascript */                                     \
   /* Special internal builtins */                                             \
@@ -244,46 +259,62 @@
   CPP(DataViewPrototypeGetBuffer)                                             \
   CPP(DataViewPrototypeGetByteLength)                                         \
   CPP(DataViewPrototypeGetByteOffset)                                         \
+  CPP(DataViewPrototypeGetInt8)                                               \
+  CPP(DataViewPrototypeSetInt8)                                               \
+  CPP(DataViewPrototypeGetUint8)                                              \
+  CPP(DataViewPrototypeSetUint8)                                              \
+  CPP(DataViewPrototypeGetInt16)                                              \
+  CPP(DataViewPrototypeSetInt16)                                              \
+  CPP(DataViewPrototypeGetUint16)                                             \
+  CPP(DataViewPrototypeSetUint16)                                             \
+  CPP(DataViewPrototypeGetInt32)                                              \
+  CPP(DataViewPrototypeSetInt32)                                              \
+  CPP(DataViewPrototypeGetUint32)                                             \
+  CPP(DataViewPrototypeSetUint32)                                             \
+  CPP(DataViewPrototypeGetFloat32)                                            \
+  CPP(DataViewPrototypeSetFloat32)                                            \
+  CPP(DataViewPrototypeGetFloat64)                                            \
+  CPP(DataViewPrototypeSetFloat64)                                            \
                                                                               \
   /* Date */                                                                  \
   CPP(DateConstructor)                                                        \
   CPP(DateConstructor_ConstructStub)                                          \
   /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */                       \
-  ASM(DatePrototypeGetDate)                                                   \
+  TFJ(DatePrototypeGetDate, 1)                                                \
   /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */                        \
-  ASM(DatePrototypeGetDay)                                                    \
+  TFJ(DatePrototypeGetDay, 1)                                                 \
   /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */                   \
-  ASM(DatePrototypeGetFullYear)                                               \
+  TFJ(DatePrototypeGetFullYear, 1)                                            \
   /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */                      \
-  ASM(DatePrototypeGetHours)                                                  \
+  TFJ(DatePrototypeGetHours, 1)                                               \
   /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */               \
-  ASM(DatePrototypeGetMilliseconds)                                           \
+  TFJ(DatePrototypeGetMilliseconds, 1)                                        \
   /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */                    \
-  ASM(DatePrototypeGetMinutes)                                                \
+  TFJ(DatePrototypeGetMinutes, 1)                                             \
   /* ES6 section 20.3.4.8 Date.prototype.getMonth */                          \
-  ASM(DatePrototypeGetMonth)                                                  \
+  TFJ(DatePrototypeGetMonth, 1)                                               \
   /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */                    \
-  ASM(DatePrototypeGetSeconds)                                                \
+  TFJ(DatePrototypeGetSeconds, 1)                                             \
   /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */                      \
-  ASM(DatePrototypeGetTime)                                                   \
+  TFJ(DatePrototypeGetTime, 1)                                                \
   /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */            \
-  ASM(DatePrototypeGetTimezoneOffset)                                         \
+  TFJ(DatePrototypeGetTimezoneOffset, 1)                                      \
   /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */                   \
-  ASM(DatePrototypeGetUTCDate)                                                \
+  TFJ(DatePrototypeGetUTCDate, 1)                                             \
   /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */                    \
-  ASM(DatePrototypeGetUTCDay)                                                 \
+  TFJ(DatePrototypeGetUTCDay, 1)                                              \
   /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */               \
-  ASM(DatePrototypeGetUTCFullYear)                                            \
+  TFJ(DatePrototypeGetUTCFullYear, 1)                                         \
   /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */                  \
-  ASM(DatePrototypeGetUTCHours)                                               \
+  TFJ(DatePrototypeGetUTCHours, 1)                                            \
   /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */           \
-  ASM(DatePrototypeGetUTCMilliseconds)                                        \
+  TFJ(DatePrototypeGetUTCMilliseconds, 1)                                     \
   /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */                \
-  ASM(DatePrototypeGetUTCMinutes)                                             \
+  TFJ(DatePrototypeGetUTCMinutes, 1)                                          \
   /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */                  \
-  ASM(DatePrototypeGetUTCMonth)                                               \
+  TFJ(DatePrototypeGetUTCMonth, 1)                                            \
   /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */                \
-  ASM(DatePrototypeGetUTCSeconds)                                             \
+  TFJ(DatePrototypeGetUTCSeconds, 1)                                          \
   CPP(DatePrototypeGetYear)                                                   \
   CPP(DatePrototypeSetYear)                                                   \
   CPP(DateNow)                                                                \
@@ -342,16 +373,21 @@
   TFJ(GeneratorPrototypeThrow, 2)                                             \
   CPP(AsyncFunctionConstructor)                                               \
                                                                               \
-  /* Encode and decode */                                                     \
+  /* Global object */                                                         \
   CPP(GlobalDecodeURI)                                                        \
   CPP(GlobalDecodeURIComponent)                                               \
   CPP(GlobalEncodeURI)                                                        \
   CPP(GlobalEncodeURIComponent)                                               \
   CPP(GlobalEscape)                                                           \
   CPP(GlobalUnescape)                                                         \
-                                                                              \
-  /* Eval */                                                                  \
   CPP(GlobalEval)                                                             \
+  /* ES6 section 18.2.2 isFinite ( number ) */                                \
+  TFJ(GlobalIsFinite, 2)                                                      \
+  /* ES6 section 18.2.3 isNaN ( number ) */                                   \
+  TFJ(GlobalIsNaN, 2)                                                         \
+                                                                              \
+  /* ES6 #sec-%iteratorprototype%-@@iterator */                               \
+  TFJ(IteratorPrototypeIterator, 1)                                           \
                                                                               \
   /* JSON */                                                                  \
   CPP(JsonParse)                                                              \
@@ -432,6 +468,14 @@
   ASM(NumberConstructor)                                                      \
   /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */  \
   ASM(NumberConstructor_ConstructStub)                                        \
+  /* ES6 section 20.1.2.2 Number.isFinite ( number ) */                       \
+  TFJ(NumberIsFinite, 2)                                                      \
+  /* ES6 section 20.1.2.3 Number.isInteger ( number ) */                      \
+  TFJ(NumberIsInteger, 2)                                                     \
+  /* ES6 section 20.1.2.4 Number.isNaN ( number ) */                          \
+  TFJ(NumberIsNaN, 2)                                                         \
+  /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */                  \
+  TFJ(NumberIsSafeInteger, 2)                                                 \
   CPP(NumberPrototypeToExponential)                                           \
   CPP(NumberPrototypeToFixed)                                                 \
   CPP(NumberPrototypeToLocaleString)                                          \
@@ -489,6 +533,10 @@
   CPP(ReflectSet)                                                             \
   CPP(ReflectSetPrototypeOf)                                                  \
                                                                               \
+  /* RegExp */                                                                \
+  CPP(RegExpConstructor)                                                      \
+  TFJ(RegExpPrototypeExec, 2)                                                 \
+                                                                              \
   /* SharedArrayBuffer */                                                     \
   CPP(SharedArrayBufferPrototypeGetByteLength)                                \
   TFJ(AtomicsLoad, 3)                                                         \
@@ -504,6 +552,17 @@
   TFJ(StringPrototypeCharAt, 2)                                               \
   /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */              \
   TFJ(StringPrototypeCharCodeAt, 2)                                           \
+  /* ES6 section 21.1.3.9 */                                                  \
+  /* String.prototype.lastIndexOf ( searchString [ , position ] ) */          \
+  CPP(StringPrototypeLastIndexOf)                                             \
+  /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */         \
+  CPP(StringPrototypeLocaleCompare)                                           \
+  /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */           \
+  CPP(StringPrototypeNormalize)                                               \
+  /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */         \
+  TFJ(StringPrototypeSubstr, 3)                                               \
+  /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */       \
+  TFJ(StringPrototypeSubstring, 3)                                            \
   /* ES6 section 21.1.3.25 String.prototype.toString () */                    \
   TFJ(StringPrototypeToString, 1)                                             \
   CPP(StringPrototypeTrim)                                                    \
@@ -511,6 +570,11 @@
   CPP(StringPrototypeTrimRight)                                               \
   /* ES6 section 21.1.3.28 String.prototype.valueOf () */                     \
   TFJ(StringPrototypeValueOf, 1)                                              \
+  /* ES6 #sec-string.prototype-@@iterator */                                  \
+  TFJ(StringPrototypeIterator, 1)                                             \
+                                                                              \
+  /* StringIterator */                                                        \
+  TFJ(StringIteratorPrototypeNext, 1)                                         \
                                                                               \
   /* Symbol */                                                                \
   CPP(SymbolConstructor)                                                      \
@@ -590,6 +654,7 @@
   Handle<Code> InterpreterPushArgsAndCall(
       TailCallMode tail_call_mode,
       CallableType function_type = CallableType::kAny);
+  Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
 
   Code* builtin(Name name) {
     // Code::cast cannot be used here since we access builtins
@@ -643,7 +708,10 @@
       MacroAssembler* masm, TailCallMode tail_call_mode,
       CallableType function_type);
 
-  static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+  static void Generate_InterpreterPushArgsAndConstructImpl(
+      MacroAssembler* masm, CallableType function_type);
+
+  static void Generate_DatePrototype_GetField(CodeStubAssembler* masm,
                                               int field_index);
 
   enum class MathMaxMinKind { kMax, kMin };
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
index f31ba6f..9dd621f 100644
--- a/src/builtins/ia32/builtins-ia32.cc
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -590,6 +590,13 @@
   __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
   __ j(not_equal, &switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ EmitLoadTypeFeedbackVector(ecx);
+  __ add(FieldOperand(ecx,
+                      TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize),
+         Immediate(Smi::FromInt(1)));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -703,20 +710,47 @@
   __ ret(0);
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch1, Register scratch2,
+                                        Label* stack_overflow,
+                                        bool include_receiver = false) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+  // Make scratch2 the space we have left. The stack might already be overflowed
+  // here which will cause scratch2 to become negative.
+  __ mov(scratch2, esp);
+  __ sub(scratch2, scratch1);
+  // Make scratch1 the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(scratch1, num_args);
+  if (include_receiver) {
+    __ add(scratch1, Immediate(1));
+  }
+  __ shl(scratch1, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmp(scratch2, scratch1);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
-                                         Register array_limit) {
+                                         Register array_limit,
+                                         Register start_address) {
   // ----------- S t a t e -------------
-  //  -- ebx : Pointer to the last argument in the args array.
+  //  -- start_address : Pointer to the last argument in the args array.
   //  -- array_limit : Pointer to one before the first argument in the
   //                   args array.
   // -----------------------------------
   Label loop_header, loop_check;
   __ jmp(&loop_check);
   __ bind(&loop_header);
-  __ Push(Operand(ebx, 0));
-  __ sub(ebx, Immediate(kPointerSize));
+  __ Push(Operand(start_address, 0));
+  __ sub(start_address, Immediate(kPointerSize));
   __ bind(&loop_check);
-  __ cmp(ebx, array_limit);
+  __ cmp(start_address, array_limit);
   __ j(greater, &loop_header, Label::kNear);
 }
 
@@ -731,18 +765,26 @@
   //           they are to be pushed onto the stack.
   //  -- edi : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
+  // Compute the expected number of arguments.
+  __ mov(ecx, eax);
+  __ add(ecx, Immediate(1));  // Add one for receiver.
+
+  // Add a stack check before pushing the arguments. We need an extra register
+  // to perform a stack check. So push it onto the stack temporarily. This
+  // might cause stack overflow, but it will be detected by the check.
+  __ Push(edi);
+  Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
+  __ Pop(edi);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ Pop(edx);
 
   // Find the address of the last argument.
-  __ mov(ecx, eax);
-  __ add(ecx, Immediate(1));  // Add one for receiver.
   __ shl(ecx, kPointerSizeLog2);
   __ neg(ecx);
   __ add(ecx, ebx);
-
-  Generate_InterpreterPushArgs(masm, ecx);
+  Generate_InterpreterPushArgs(masm, ecx, ebx);
 
   // Call the target.
   __ Push(edx);  // Re-push return address.
@@ -757,43 +799,210 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edi);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
+namespace {
+
+// This function modified start_addr, and only reads the contents of num_args
+// register. scratch1 and scratch2 are used as temporary registers. Their
+// original values are restored after the use.
+void Generate_InterpreterPushArgsAndReturnAddress(
+    MacroAssembler* masm, Register num_args, Register start_addr,
+    Register scratch1, Register scratch2, bool receiver_in_args,
+    int num_slots_above_ret_addr, Label* stack_overflow) {
+  // We have to move return address and the temporary registers above it
+  // before we can copy arguments onto the stack. To achieve this:
+  // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
+  // Step 2: Move the return address and values above it to the top of stack.
+  // Step 3: Copy the arguments into the correct locations.
+  //  current stack    =====>    required stack layout
+  // |             |            | scratch1      | (2) <-- esp(1)
+  // |             |            | ....          | (2)
+  // |             |            | scratch-n     | (2)
+  // |             |            | return addr   | (2)
+  // |             |            | arg N         | (3)
+  // | scratch1    | <-- esp    | ....          |
+  // | ....        |            | arg 0         |
+  // | scratch-n   |            | arg 0         |
+  // | return addr |            | receiver slot |
+
+  // Check for stack overflow before we increment the stack pointer.
+  Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
+                              stack_overflow, true);
+
+// Step 1 - Update the stack pointer. scratch1 already contains the required
+// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
+// the Generate_StackOverflowCheck.
+
+#ifdef _MSC_VER
+  // TODO(mythria): Move it to macro assembler.
+  // In windows, we cannot increment the stack size by more than one page
+  // (mimimum page size is 4KB) without accessing at least one byte on the
+  // page. Check this:
+  // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+  const int page_size = 4 * 1024;
+  Label check_offset, update_stack_pointer;
+  __ bind(&check_offset);
+  __ cmp(scratch1, page_size);
+  __ j(less, &update_stack_pointer);
+  __ sub(esp, Immediate(page_size));
+  // Just to touch the page, before we increment further.
+  __ mov(Operand(esp, 0), Immediate(0));
+  __ sub(scratch1, Immediate(page_size));
+  __ jmp(&check_offset);
+  __ bind(&update_stack_pointer);
+#endif
+
+  __ sub(esp, scratch1);
+
+  // Step 2 move return_address and slots above it to the correct locations.
+  // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
+  // basically when the source and destination overlap. We at least need one
+  // extra slot for receiver, so no extra checks are required to avoid copy.
+  for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
+    __ mov(scratch1,
+           Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
+    __ mov(Operand(esp, i * kPointerSize), scratch1);
+  }
+
+  // Step 3 copy arguments to correct locations.
+  if (receiver_in_args) {
+    __ mov(scratch1, num_args);
+    __ add(scratch1, Immediate(1));
+  } else {
+    // Slot meant for receiver contains return address. Reset it so that
+    // we will not incorrectly interpret return address as an object.
+    __ mov(Operand(esp, num_args, times_pointer_size,
+                   (num_slots_above_ret_addr + 1) * kPointerSize),
+           Immediate(0));
+    __ mov(scratch1, num_args);
+  }
+
+  Label loop_header, loop_check;
+  __ jmp(&loop_check);
+  __ bind(&loop_header);
+  __ mov(scratch2, Operand(start_addr, 0));
+  __ mov(Operand(esp, scratch1, times_pointer_size,
+                 num_slots_above_ret_addr * kPointerSize),
+         scratch2);
+  __ sub(start_addr, Immediate(kPointerSize));
+  __ sub(scratch1, Immediate(1));
+  __ bind(&loop_check);
+  __ cmp(scratch1, Immediate(0));
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+}  // end anonymous namespace
+
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edx : the new target
   //  -- edi : the constructor
-  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //  -- ebx : allocation site feedback (if available or undefined)
+  //  -- ecx : the address of the first argument to be pushed. Subsequent
   //           arguments should be consecutive above this, in the same order as
   //           they are to be pushed onto the stack.
   // -----------------------------------
-
-  // Pop return address to allow tail-call after pushing arguments.
-  __ Pop(ecx);
-
-  // Push edi in the slot meant for receiver. We need an extra register
-  // so store edi temporarily on stack.
+  Label stack_overflow;
+  // We need two scratch registers. Push edi and edx onto stack.
   __ Push(edi);
+  __ Push(edx);
 
-  // Find the address of the last argument.
-  __ mov(edi, eax);
-  __ neg(edi);
-  __ shl(edi, kPointerSizeLog2);
-  __ add(edi, ebx);
+  // Push arguments and move return address to the top of stack.
+  // The eax register is readonly. The ecx register will be modified. The edx
+  // and edi registers will be modified but restored to their original values.
+  Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, false,
+                                               2, &stack_overflow);
 
-  Generate_InterpreterPushArgs(masm, edi);
+  // Restore edi and edx
+  __ Pop(edx);
+  __ Pop(edi);
 
-  // Restore the constructor from slot on stack. It was pushed at the slot
-  // meant for receiver.
-  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+  __ AssertUndefinedOrAllocationSite(ebx);
+  if (construct_type == CallableType::kJSFunction) {
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ AssertFunction(edi);
 
-  // Re-push return address.
-  __ Push(ecx);
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+    __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+    __ jmp(ecx);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
 
-  // Call the constructor with unmodified eax, edi, ebi values.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+    // Call the constructor with unmodified eax, edi, edx values.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edx);
+    __ Pop(edi);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the target to call checked to be Array function.
+  //  -- ebx : the allocation site feedback
+  //  -- ecx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+  Label stack_overflow;
+  // We need two scratch registers. Register edi is available, push edx onto
+  // stack.
+  __ Push(edx);
+
+  // Push arguments and move return address to the top of stack.
+  // The eax register is readonly. The ecx register will be modified. The edx
+  // and edi registers will be modified but restored to their original values.
+  Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, true,
+                                               1, &stack_overflow);
+
+  // Restore edx.
+  __ Pop(edx);
+
+  // Array constructor expects constructor in edi. It is same as edx here.
+  __ Move(edi, edx);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edx);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1222,61 +1431,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- eax    : number of arguments
-  //  -- edi    : function
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  //  -- esp[4] : receiver
-  // -----------------------------------
-
-  // 1. Load receiver into eax and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ mov(eax, Operand(esp, kPointerSize));
-    __ JumpIfSmi(eax, &receiver_not_date);
-    __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
-    __ j(not_equal, &receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ mov(edx, Operand::StaticVariable(
-                      ExternalReference::date_cache_stamp(masm->isolate())));
-      __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
-      __ j(not_equal, &stamp_mismatch, Label::kNear);
-      __ mov(eax, FieldOperand(
-                      eax, JSDate::kValueOffset + field_index * kPointerSize));
-      __ ret(1 * kPointerSize);
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, ebx);
-    __ mov(Operand(esp, 0), eax);
-    __ mov(Operand(esp, 1 * kPointerSize),
-           Immediate(Smi::FromInt(field_index)));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ ret(1 * kPointerSize);
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Move(ebx, Immediate(0));
-    __ EnterBuiltinFrame(esi, edi, ebx);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -1887,10 +2041,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(ebx);
     __ EnterBuiltinFrame(esi, edi, ebx);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(esi, edi, ebx);
     __ SmiUntag(ebx);
   }
@@ -1954,11 +2107,10 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(ebx);
       __ EnterBuiltinFrame(esi, edi, ebx);
       __ Push(edx);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Pop(edx);
       __ LeaveBuiltinFrame(esi, edi, ebx);
       __ SmiUntag(ebx);
@@ -2009,32 +2161,6 @@
   }
 }
 
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
-                                       Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- eax : actual number of arguments
-  //  -- ebx : expected number of arguments
-  //  -- edx : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edi, Operand::StaticVariable(real_stack_limit));
-  // Make ecx the space we have left. The stack might already be overflowed
-  // here which will cause ecx to become negative.
-  __ mov(ecx, esp);
-  __ sub(ecx, edi);
-  // Make edi the space we need for the array when it is unrolled onto the
-  // stack.
-  __ mov(edi, ebx);
-  __ shl(edi, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmp(ecx, edi);
-  __ j(less_equal, stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
   __ mov(ebp, esp);
@@ -2743,24 +2869,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label not_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
@@ -2781,7 +2889,9 @@
   {  // Enough parameters: Actual >= expected.
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // edi is used as a scratch register. It should be restored from the frame
+    // when needed.
+    Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
 
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2802,7 +2912,9 @@
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // edi is used as a scratch register. It should be restored from the frame
+    // when needed.
+    Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
 
     // Remember expected arguments in ecx.
     __ mov(ecx, ebx);
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
index 003eeb2..a2b6bea 100644
--- a/src/builtins/mips/builtins-mips.cc
+++ b/src/builtins/mips/builtins-mips.cc
@@ -395,10 +395,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
   }
@@ -459,11 +458,10 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(t0);
       __ EnterBuiltinFrame(cp, a1, t0);
       __ Push(a3);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Move(a0, v0);
       __ Pop(a3);
       __ LeaveBuiltinFrame(cp, a1, t0);
@@ -1051,6 +1049,17 @@
   __ Branch(&switch_to_different_code_kind, ne, a0,
             Operand(masm->CodeObject()));  // Self-reference to this code.
 
+  // Increment invocation count for the function.
+  __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+  __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+  __ lw(t0, FieldMemOperand(
+                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(
+                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
@@ -1160,6 +1169,45 @@
   __ Jump(ra);
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch1, Register scratch2,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+  // Make scratch1 the space we have left. The stack might already be overflowed
+  // here which will cause scratch1 to become negative.
+  __ subu(scratch1, sp, scratch1);
+  // Check if the arguments will overflow the stack.
+  __ sll(scratch2, num_args, kPointerSizeLog2);
+  // Signed comparison.
+  __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register scratch, Register scratch2,
+                                         Label* stack_overflow) {
+  Generate_StackOverflowCheck(masm, num_args, scratch, scratch2,
+                              stack_overflow);
+
+  // Find the address of the last argument.
+  __ mov(scratch2, num_args);
+  __ sll(scratch2, scratch2, kPointerSizeLog2);
+  __ Subu(scratch2, index, Operand(scratch2));
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ lw(scratch, MemOperand(index));
+  __ Addu(index, index, Operand(-kPointerSize));
+  __ push(scratch);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, index, Operand(scratch2));
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1171,21 +1219,12 @@
   //          they are to be pushed onto the stack.
   //  -- a1 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
-  // Find the address of the last argument.
-  __ Addu(a3, a0, Operand(1));  // Add one for receiver.
-  __ sll(a3, a3, kPointerSizeLog2);
-  __ Subu(a3, a2, Operand(a3));
+  __ Addu(t0, a0, Operand(1));  // Add one for receiver.
 
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Branch(&loop_check);
-  __ bind(&loop_header);
-  __ lw(t0, MemOperand(a2));
-  __ Addu(a2, a2, Operand(-kPointerSize));
-  __ push(t0);
-  __ bind(&loop_check);
-  __ Branch(&loop_header, gt, a2, Operand(a3));
+  // This function modifies a2, t4 and t1.
+  Generate_InterpreterPushArgs(masm, t0, a2, t4, t1, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1198,36 +1237,87 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- a0 : argument count (not including receiver)
   // -- a3 : new target
   // -- a1 : constructor to call
-  // -- a2 : address of the first argument
+  // -- a2 : allocation site feedback if available, undefined otherwise.
+  // -- t4 : address of the first argument
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ sll(t0, a0, kPointerSizeLog2);
-  __ Subu(t0, a2, Operand(t0));
+  Label stack_overflow;
 
   // Push a slot for the receiver.
   __ push(zero_reg);
 
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Branch(&loop_check);
-  __ bind(&loop_header);
-  __ lw(t1, MemOperand(a2));
-  __ Addu(a2, a2, Operand(-kPointerSize));
-  __ push(t1);
-  __ bind(&loop_check);
-  __ Branch(&loop_header, gt, a2, Operand(t0));
+  // This function modified t4, t1 and t0.
+  Generate_InterpreterPushArgs(masm, a0, t4, t1, t0, &stack_overflow);
 
-  // Call the constructor with a0, a1, and a3 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(a2, t0);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(a1);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
+    __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Jump(at);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with a0, a1, and a3 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call checked to be Array function.
+  //  -- a2 : allocation site feedback.
+  //  -- a3 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ Addu(t0, a0, Operand(1));  // Add one for receiver.
+
+  // This function modifies a3, t4, and t1.
+  Generate_InterpreterPushArgs(masm, t0, a3, t1, t4, &stack_overflow);
+
+  // ArrayConstructor stub expects constructor in a3. Set it here.
+  __ mov(a3, a1);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1805,61 +1895,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- a0    : number of arguments
-  //  -- a1    : function
-  //  -- cp    : context
-  //  -- sp[0] : receiver
-  // -----------------------------------
-
-  // 1. Pop receiver into a0 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(a0);
-    __ JumpIfSmi(a0, &receiver_not_date);
-    __ GetObjectType(a0, t0, t0);
-    __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ Ret(USE_DELAY_SLOT);
-    __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset));  // In delay slot.
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
-      __ lw(a1, MemOperand(a1));
-      __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
-      __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
-      __ Ret(USE_DELAY_SLOT);
-      __ lw(v0, FieldMemOperand(
-                    a0, JSDate::kValueOffset +
-                            field_index * kPointerSize));  // In delay slot.
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, t0);
-    __ li(a1, Operand(Smi::FromInt(field_index)));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(a0);
-    __ Move(a0, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, a1, a0);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0    : argc
@@ -2115,27 +2150,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- a0 : actual number of arguments
-  //  -- a1 : function (passed through to callee)
-  //  -- a2 : expected number of arguments
-  //  -- a3 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
-  // Make t1 the space we have left. The stack might already be overflowed
-  // here which will cause t1 to become negative.
-  __ subu(t1, sp, t1);
-  // Check if the arguments will overflow the stack.
-  __ sll(at, a2, kPointerSizeLog2);
-  // Signed comparison.
-  __ Branch(stack_overflow, le, t1, Operand(at));
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ sll(a0, a0, kSmiTagSize);
   __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2854,28 +2868,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in a0.
-  Label not_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
   // ----------- S t a t e -------------
@@ -2900,7 +2892,7 @@
     // a3: new target (passed through to callee)
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
 
     // Calculate copy start address into a0 and copy end address into t1.
     __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
@@ -2930,7 +2922,7 @@
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
 
     // Calculate copy start address into a0 and copy end address into t3.
     // a0: actual number of arguments as a smi
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
index cbdb5c3..f7225f0 100644
--- a/src/builtins/mips64/builtins-mips64.cc
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -394,10 +394,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
   }
@@ -458,11 +457,10 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(t0);
       __ EnterBuiltinFrame(cp, a1, t0);
       __ Push(a3);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Move(a0, v0);
       __ Pop(a3);
       __ LeaveBuiltinFrame(cp, a1, t0);
@@ -1043,6 +1041,17 @@
   __ Branch(&switch_to_different_code_kind, ne, a0,
             Operand(masm->CodeObject()));  // Self-reference to this code.
 
+  // Increment invocation count for the function.
+  __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+  __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+  __ ld(a4, FieldMemOperand(
+                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(
+                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
@@ -1152,6 +1161,45 @@
   __ Jump(ra);
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch1, Register scratch2,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+  // Make scratch1 the space we have left. The stack might already be overflowed
+  // here which will cause scratch1 to become negative.
+  __ dsubu(scratch1, sp, scratch1);
+  // Check if the arguments will overflow the stack.
+  __ dsll(scratch2, num_args, kPointerSizeLog2);
+  // Signed comparison.
+  __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register scratch, Register scratch2,
+                                         Label* stack_overflow) {
+  //  Generate_StackOverflowCheck(masm, num_args, scratch, scratch2,
+  //  stack_overflow);
+
+  // Find the address of the last argument.
+  __ mov(scratch2, num_args);
+  __ dsll(scratch2, scratch2, kPointerSizeLog2);
+  __ Dsubu(scratch2, index, Operand(scratch2));
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ ld(scratch, MemOperand(index));
+  __ Daddu(index, index, Operand(-kPointerSize));
+  __ push(scratch);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, index, Operand(scratch2));
+}
+
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1163,21 +1211,12 @@
   //          they are to be pushed onto the stack.
   //  -- a1 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
-  // Find the address of the last argument.
   __ Daddu(a3, a0, Operand(1));  // Add one for receiver.
-  __ dsll(a3, a3, kPointerSizeLog2);
-  __ Dsubu(a3, a2, Operand(a3));
 
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Branch(&loop_check);
-  __ bind(&loop_header);
-  __ ld(t0, MemOperand(a2));
-  __ Daddu(a2, a2, Operand(-kPointerSize));
-  __ push(t0);
-  __ bind(&loop_check);
-  __ Branch(&loop_header, gt, a2, Operand(a3));
+  // This function modifies a2, t0 and a4.
+  Generate_InterpreterPushArgs(masm, a3, a2, a4, t0, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1190,36 +1229,87 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- a0 : argument count (not including receiver)
   // -- a3 : new target
   // -- a1 : constructor to call
-  // -- a2 : address of the first argument
+  // -- a2 : allocation site feedback if available, undefined otherwise.
+  // -- a4 : address of the first argument
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ dsll(t0, a0, kPointerSizeLog2);
-  __ Dsubu(t0, a2, Operand(t0));
+  Label stack_overflow;
 
   // Push a slot for the receiver.
   __ push(zero_reg);
 
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Branch(&loop_check);
-  __ bind(&loop_header);
-  __ ld(t1, MemOperand(a2));
-  __ Daddu(a2, a2, Operand(-kPointerSize));
-  __ push(t1);
-  __ bind(&loop_check);
-  __ Branch(&loop_header, gt, a2, Operand(t0));
+  // This function modifies t0, a4 and a5.
+  Generate_InterpreterPushArgs(masm, a0, a4, a5, t0, &stack_overflow);
 
-  // Call the constructor with a0, a1, and a3 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(a2, t0);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(a1);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+    __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Jump(at);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with a0, a1, and a3 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call checked to be Array function.
+  //  -- a2 : allocation site feedback.
+  //  -- a3 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ Daddu(a4, a0, Operand(1));  // Add one for receiver.
+
+  // This function modifies a3, a5 and a6.
+  Generate_InterpreterPushArgs(masm, a4, a3, a5, a6, &stack_overflow);
+
+  // ArrayConstructor stub expects constructor in a3. Set it here.
+  __ mov(a3, a1);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ break_(0xCC);
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1799,61 +1889,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- a0                 : number of arguments
-  //  -- a1                 : function
-  //  -- cp                 : context
-  //  -- sp[0] : receiver
-  // -----------------------------------
-
-  // 1. Pop receiver into a0 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(a0);
-    __ JumpIfSmi(a0, &receiver_not_date);
-    __ GetObjectType(a0, t0, t0);
-    __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ Ret(USE_DELAY_SLOT);
-    __ ld(v0, FieldMemOperand(a0, JSDate::kValueOffset));  // In delay slot.
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
-      __ ld(a1, MemOperand(a1));
-      __ ld(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
-      __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
-      __ Ret(USE_DELAY_SLOT);
-      __ ld(v0, FieldMemOperand(
-                    a0, JSDate::kValueOffset +
-                            field_index * kPointerSize));  // In delay slot.
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, t0);
-    __ li(a1, Operand(Smi::FromInt(field_index)));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(a0);
-    __ Move(a0, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, a1, a0);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0    : argc
@@ -2109,27 +2144,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- a0 : actual number of arguments
-  //  -- a1 : function (passed through to callee)
-  //  -- a2 : expected number of arguments
-  //  -- a3 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
-  // Make a5 the space we have left. The stack might already be overflowed
-  // here which will cause a5 to become negative.
-  __ dsubu(a5, sp, a5);
-  // Check if the arguments will overflow the stack.
-  __ dsll(at, a2, kPointerSizeLog2);
-  // Signed comparison.
-  __ Branch(stack_overflow, le, a5, Operand(at));
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   // __ sll(a0, a0, kSmiTagSize);
   __ dsll32(a0, a0, 0);
@@ -2847,28 +2861,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in a0.
-  Label not_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
   // ----------- S t a t e -------------
@@ -2893,7 +2885,7 @@
     // a3: new target (passed through to callee)
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
 
     // Calculate copy start address into a0 and copy end address into a4.
     __ SmiScale(a0, a0, kPointerSizeLog2);
@@ -2924,7 +2916,7 @@
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
 
     // Calculate copy start address into a0 and copy end address into a7.
     // a0: actual number of arguments as a smi
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
index dfea83f..7e2b82c 100644
--- a/src/builtins/ppc/builtins-ppc.cc
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -398,10 +398,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(r5);
     __ EnterBuiltinFrame(cp, r4, r5);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, r4, r5);
     __ SmiUntag(r5);
   }
@@ -462,12 +461,11 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(r9);
       __ EnterBuiltinFrame(cp, r4, r9);
       __ Push(r6);
       __ mr(r3, r5);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ mr(r5, r3);
       __ Pop(r6);
       __ LeaveBuiltinFrame(cp, r4, r9);
@@ -1084,6 +1082,18 @@
   __ cmp(r3, ip);
   __ bne(&switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+  __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
+  __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+  __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
+  __ StoreP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+                                            kPointerSize +
+                                        TypeFeedbackVector::kHeaderSize),
+            r0);
+
   // Check function data field is actually a BytecodeArray object.
 
   if (FLAG_debug_code) {
@@ -1187,8 +1197,29 @@
   __ blr();
 }
 
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
-                                         Register count, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+  // Make scratch the space we have left. The stack might already be overflowed
+  // here which will cause scratch to become negative.
+  __ sub(scratch, sp, scratch);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
+  __ cmp(scratch, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register count, Register scratch,
+                                         Label* stack_overflow) {
+  // A stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
   Label loop;
   __ addi(index, index, Operand(kPointerSize));  // Bias up for LoadPU
   __ mtctr(count);
@@ -1209,12 +1240,13 @@
   //          they are to be pushed onto the stack.
   //  -- r4 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
   // Calculate number of arguments (add one for receiver).
   __ addi(r6, r3, Operand(1));
 
-  // Push the arguments.
-  Generate_InterpreterPushArgs(masm, r5, r6, r7);
+  // Push the arguments. r5, r6, r7 will be modified.
+  Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1227,16 +1259,26 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable Code.
+    __ bkpt(0);
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- r3 : argument count (not including receiver)
   // -- r6 : new target
   // -- r4 : constructor to call
-  // -- r5 : address of the first argument
+  // -- r5 : allocation site feedback if available, undefined otherwise.
+  // -- r7 : address of the first argument
   // -----------------------------------
+  Label stack_overflow;
 
   // Push a slot for the receiver to be constructed.
   __ li(r0, Operand::Zero());
@@ -1246,11 +1288,64 @@
   Label skip;
   __ cmpi(r3, Operand::Zero());
   __ beq(&skip);
-  Generate_InterpreterPushArgs(masm, r5, r3, r7);
+  // Push the arguments. r8, r7, r9 will be modified.
+  Generate_InterpreterPushArgs(masm, r3, r7, r3, r8, &stack_overflow);
   __ bind(&skip);
 
-  // Call the constructor with r3, r4, and r6 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(r5, r8);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(r4);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+    // Jump to the construct function.
+    __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Jump(ip);
+
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with r3, r4, and r6 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable Code.
+    __ bkpt(0);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r3 : argument count (not including receiver)
+  // -- r4 : target to call verified to be Array function
+  // -- r5 : allocation site feedback if available, undefined otherwise.
+  // -- r6 : address of the first argument
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ addi(r7, r3, Operand(1));  // Add one for receiver.
+
+  // Push the arguments. r6, r8, r3 will be modified.
+  Generate_InterpreterPushArgs(masm, r7, r6, r7, r8, &stack_overflow);
+
+  // Array constructor expects constructor in r6. It is same as r4 here.
+  __ mr(r6, r4);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable code.
+    __ bkpt(0);
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1842,61 +1937,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- r3    : number of arguments
-  //  -- r4    : function
-  //  -- cp    : context
-  //  -- lr    : return address
-  //  -- sp[0] : receiver
-  // -----------------------------------
-
-  // 1. Pop receiver into r3 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(r3);
-    __ JumpIfSmi(r3, &receiver_not_date);
-    __ CompareObjectType(r3, r5, r6, JS_DATE_TYPE);
-    __ bne(&receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
-      __ LoadP(r4, MemOperand(r4));
-      __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
-      __ cmp(r4, ip);
-      __ bne(&stamp_mismatch);
-      __ LoadP(r3, FieldMemOperand(
-                       r3, JSDate::kValueOffset + field_index * kPointerSize));
-      __ Ret();
-      __ bind(&stamp_mismatch);
-    }
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, r4);
-    __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ push(r3);
-    __ LoadSmiLiteral(r3, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, r4, r3);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3    : argc
@@ -2151,27 +2191,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- r3 : actual number of arguments
-  //  -- r4 : function (passed through to callee)
-  //  -- r5 : expected number of arguments
-  //  -- r6 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
-  // Make r8 the space we have left. The stack might already be overflowed
-  // here which will cause r8 to become negative.
-  __ sub(r8, sp, r8);
-  // Check if the arguments will overflow the stack.
-  __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
-  __ cmp(r8, r0);
-  __ ble(stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r3);
   __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2433,7 +2452,9 @@
   Label class_constructor;
   __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
-  __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorBits, r0);
+  __ TestBitMask(r6, FunctionKind::kClassConstructor
+                         << SharedFunctionInfo::kFunctionKindShift,
+                 r0);
   __ bne(&class_constructor, cr0);
 
   // Enter the context of the function; ToObject has to run in the function
@@ -2861,22 +2882,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r3.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ TestIfSmi(r3, r0);
-  __ Ret(eq, cr0);
-
-  __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
-  // r3: receiver
-  // r4: receiver instance type
-  __ Ret(eq);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3 : actual number of arguments
@@ -2897,7 +2902,7 @@
   {  // Enough parameters: actual >= expected
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
 
     // Calculate copy start address into r3 and copy end address into r7.
     // r3: actual number of arguments as a smi
@@ -2935,7 +2940,7 @@
     __ bind(&too_few);
 
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
 
     // Calculate copy start address into r0 and copy end address is fp.
     // r3: actual number of arguments as a smi
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index c68fcc3..91ae2c0 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -396,10 +396,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(r4);
     __ EnterBuiltinFrame(cp, r3, r4);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(cp, r3, r4);
     __ SmiUntag(r4);
   }
@@ -459,12 +458,11 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(r8);
       __ EnterBuiltinFrame(cp, r3, r8);
       __ Push(r5);
       __ LoadRR(r2, r4);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ LoadRR(r4, r2);
       __ Pop(r5);
       __ LeaveBuiltinFrame(cp, r3, r8);
@@ -1087,6 +1085,17 @@
   __ CmpP(r2, Operand(masm->CodeObject()));  // Self-reference to this code.
   __ bne(&switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
+  __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+  __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+  __ StoreP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+                                            kPointerSize +
+                                        TypeFeedbackVector::kHeaderSize));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ TestIfSmi(kInterpreterBytecodeArrayRegister);
@@ -1191,8 +1200,29 @@
   __ Ret();
 }
 
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
-                                         Register count, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+  // Make scratch the space we have left. The stack might already be overflowed
+  // here which will cause scratch to become negative.
+  __ SubP(scratch, sp, scratch);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
+  __ CmpP(scratch, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register num_args, Register index,
+                                         Register count, Register scratch,
+                                         Label* stack_overflow) {
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
   Label loop;
   __ AddP(index, index, Operand(kPointerSize));  // Bias up for LoadPU
   __ LoadRR(r0, count);
@@ -1215,12 +1245,13 @@
   //          they are to be pushed onto the stack.
   //  -- r3 : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
 
   // Calculate number of arguments (AddP one for receiver).
   __ AddP(r5, r2, Operand(1));
 
   // Push the arguments.
-  Generate_InterpreterPushArgs(masm, r4, r5, r6);
+  Generate_InterpreterPushArgs(masm, r5, r4, r5, r6, &stack_overflow);
 
   // Call the target.
   if (function_type == CallableType::kJSFunction) {
@@ -1233,16 +1264,26 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable Code.
+    __ bkpt(0);
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   // -- r2 : argument count (not including receiver)
   // -- r5 : new target
   // -- r3 : constructor to call
-  // -- r4 : address of the first argument
+  // -- r4 : allocation site feedback if available, undefined otherwise.
+  // -- r6 : address of the first argument
   // -----------------------------------
+  Label stack_overflow;
 
   // Push a slot for the receiver to be constructed.
   __ LoadImmP(r0, Operand::Zero());
@@ -1252,11 +1293,63 @@
   Label skip;
   __ CmpP(r2, Operand::Zero());
   __ beq(&skip);
-  Generate_InterpreterPushArgs(masm, r4, r2, r6);
+  Generate_InterpreterPushArgs(masm, r2, r6, r2, r7, &stack_overflow);
   __ bind(&skip);
 
-  // Call the constructor with r2, r3, and r5 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(r4, r7);
+  if (construct_type == CallableType::kJSFunction) {
+    __ AssertFunction(r3);
+
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+    // Jump to the construct function.
+    __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Jump(ip);
+
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor with r2, r3, and r5 unmodified.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable Code.
+    __ bkpt(0);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r2 : argument count (not including receiver)
+  // -- r3 : target to call verified to be Array function
+  // -- r4 : allocation site feedback if available, undefined otherwise.
+  // -- r5 : address of the first argument
+  // -----------------------------------
+  Label stack_overflow;
+
+  __ AddP(r6, r2, Operand(1));  // Add one for receiver.
+
+  // Push the arguments. r6, r8, r3 will be modified.
+  Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
+
+  // Array constructor expects constructor in r5. It is same as r3 here.
+  __ LoadRR(r5, r3);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // Unreachable Code.
+    __ bkpt(0);
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1844,62 +1937,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- r2    : number of arguments
-  //  -- r3    : function
-  //  -- cp    : context
-
-  //  -- lr    : return address
-  //  -- sp[0] : receiver
-  // -----------------------------------
-
-  // 1. Pop receiver into r2 and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ Pop(r2);
-    __ JumpIfSmi(r2, &receiver_not_date);
-    __ CompareObjectType(r2, r4, r5, JS_DATE_TYPE);
-    __ bne(&receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
-      __ LoadP(r3, MemOperand(r3));
-      __ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
-      __ CmpP(r3, ip);
-      __ bne(&stamp_mismatch);
-      __ LoadP(r2, FieldMemOperand(
-                       r2, JSDate::kValueOffset + field_index * kPointerSize));
-      __ Ret();
-      __ bind(&stamp_mismatch);
-    }
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, r3);
-    __ LoadSmiLiteral(r3, Smi::FromInt(field_index));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ Ret();
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ push(r2);
-    __ LoadSmiLiteral(r2, Smi::FromInt(0));
-    __ EnterBuiltinFrame(cp, r3, r2);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2    : argc
@@ -2154,27 +2191,6 @@
   }
 }
 
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
-                                      Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- r2 : actual number of arguments
-  //  -- r3 : function (passed through to callee)
-  //  -- r4 : expected number of arguments
-  //  -- r5 : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  __ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
-  // Make r7 the space we have left. The stack might already be overflowed
-  // here which will cause r7 to become negative.
-  __ SubP(r7, sp, r7);
-  // Check if the arguments will overflow the stack.
-  __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
-  __ CmpP(r7, r0);
-  __ ble(stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r2);
   __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2445,7 +2461,9 @@
   Label class_constructor;
   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
   __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
-  __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
+  __ TestBitMask(r5, FunctionKind::kClassConstructor
+                         << SharedFunctionInfo::kFunctionKindShift,
+                 r0);
   __ bne(&class_constructor);
 
   // Enter the context of the function; ToObject has to run in the function
@@ -2875,22 +2893,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r2.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ TestIfSmi(r2);
-  __ Ret(eq);
-
-  __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
-  // r2: receiver
-  // r3: receiver instance type
-  __ Ret(eq);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2 : actual number of arguments
@@ -2911,7 +2913,7 @@
   {  // Enough parameters: actual >= expected
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
 
     // Calculate copy start address into r2 and copy end address into r6.
     // r2: actual number of arguments as a smi
@@ -2949,7 +2951,7 @@
     __ bind(&too_few);
 
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+    Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
 
     // Calculate copy start address into r0 and copy end address is fp.
     // r2: actual number of arguments as a smi
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index 1536604..beae2d2 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -672,6 +672,15 @@
   __ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset));
   __ j(not_equal, &switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+  __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+  __ SmiAddConstant(
+      FieldOperand(rcx,
+                   TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                       TypeFeedbackVector::kHeaderSize),
+      Smi::FromInt(1));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -782,33 +791,44 @@
   __ ret(0);
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch1, Register scratch2,
+                                        Label* stack_overflow) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+  __ movp(scratch2, rsp);
+  // Make scratch2 the space we have left. The stack might already be overflowed
+  // here which will cause scratch2 to become negative.
+  __ subp(scratch2, scratch1);
+  // Make scratch1 the space we need for the array when it is unrolled onto the
+  // stack.
+  __ movp(scratch1, num_args);
+  __ shlp(scratch1, Immediate(kPointerSizeLog2));
+  // Check if the arguments will overflow the stack.
+  __ cmpp(scratch2, scratch1);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
-                                         bool push_receiver) {
-  // ----------- S t a t e -------------
-  //  -- rax : the number of arguments (not including the receiver)
-  //  -- rbx : the address of the first argument to be pushed. Subsequent
-  //           arguments should be consecutive above this, in the same order as
-  //           they are to be pushed onto the stack.
-  // -----------------------------------
-
+                                         Register num_args,
+                                         Register start_address,
+                                         Register scratch) {
   // Find the address of the last argument.
-  __ movp(rcx, rax);
-  if (push_receiver) {
-    __ addp(rcx, Immediate(1));  // Add one for receiver.
-  }
-
-  __ shlp(rcx, Immediate(kPointerSizeLog2));
-  __ negp(rcx);
-  __ addp(rcx, rbx);
+  __ Move(scratch, num_args);
+  __ shlp(scratch, Immediate(kPointerSizeLog2));
+  __ negp(scratch);
+  __ addp(scratch, start_address);
 
   // Push the arguments.
   Label loop_header, loop_check;
   __ j(always, &loop_check);
   __ bind(&loop_header);
-  __ Push(Operand(rbx, 0));
-  __ subp(rbx, Immediate(kPointerSize));
+  __ Push(Operand(start_address, 0));
+  __ subp(start_address, Immediate(kPointerSize));
   __ bind(&loop_check);
-  __ cmpp(rbx, rcx);
+  __ cmpp(start_address, scratch);
   __ j(greater, &loop_header, Label::kNear);
 }
 
@@ -823,11 +843,20 @@
   //           they are to be pushed onto the stack.
   //  -- rdi : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
+
+  // Number of values to be pushed.
+  __ Move(rcx, rax);
+  __ addp(rcx, Immediate(1));  // Add one for receiver.
+
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, rcx, rdx, r8, &stack_overflow);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ PopReturnAddressTo(kScratchRegister);
 
-  Generate_InterpreterPushArgs(masm, true);
+  // rbx and rdx will be modified.
+  Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
 
   // Call the target.
   __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
@@ -842,19 +871,33 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  // Throw stack overflow exception.
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rdx : the new target (either the same as the constructor or
   //           the JSFunction on which new was invoked initially)
   //  -- rdi : the constructor to call (can be any Object)
-  //  -- rbx : the address of the first argument to be pushed. Subsequent
+  //  -- rbx : the allocation site feedback if available, undefined otherwise
+  //  -- rcx : the address of the first argument to be pushed. Subsequent
   //           arguments should be consecutive above this, in the same order as
   //           they are to be pushed onto the stack.
   // -----------------------------------
+  Label stack_overflow;
+
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, rax, r8, r9, &stack_overflow);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ PopReturnAddressTo(kScratchRegister);
@@ -862,13 +905,80 @@
   // Push slot for the receiver to be constructed.
   __ Push(Immediate(0));
 
-  Generate_InterpreterPushArgs(masm, false);
+  // rcx and r8 will be modified.
+  Generate_InterpreterPushArgs(masm, rax, rcx, r8);
 
   // Push return address in preparation for the tail-call.
   __ PushReturnAddressFrom(kScratchRegister);
 
-  // Call the constructor (rax, rdx, rdi passed on).
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ AssertUndefinedOrAllocationSite(rbx);
+  if (construct_type == CallableType::kJSFunction) {
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ AssertFunction(rdi);
+
+    __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
+    __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+    // Jump to the constructor function (rax, rbx, rdx passed on).
+    __ jmp(rcx);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
+    // Call the constructor (rax, rdx, rdi passed on).
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  // Throw stack overflow exception.
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // This should be unreachable.
+    __ int3();
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the target to call checked to be Array function.
+  //  -- rbx : the allocation site feedback
+  //  -- rcx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+  Label stack_overflow;
+
+  // Number of values to be pushed.
+  __ Move(r8, rax);
+  __ addp(r8, Immediate(1));  // Add one for receiver.
+
+  // Add a stack check before pushing arguments.
+  Generate_StackOverflowCheck(masm, r8, rdi, r9, &stack_overflow);
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ PopReturnAddressTo(kScratchRegister);
+
+  // rcx and rdi will be modified.
+  Generate_InterpreterPushArgs(masm, r8, rcx, rdi);
+
+  // Push return address in preparation for the tail-call.
+  __ PushReturnAddressFrom(kScratchRegister);
+
+  // Array constructor expects constructor in rdi. It is same as rdx here.
+  __ Move(rdi, rdx);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  // Throw stack overflow exception.
+  __ bind(&stack_overflow);
+  {
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1275,60 +1385,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- rax    : number of arguments
-  //  -- rdi    : function
-  //  -- rsi    : context
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : receiver
-  // -----------------------------------
-
-  // 1. Load receiver into rax and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    StackArgumentsAccessor args(rsp, 0);
-    __ movp(rax, args.GetReceiverOperand());
-    __ JumpIfSmi(rax, &receiver_not_date);
-    __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
-    __ j(not_equal, &receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ movp(rax, FieldOperand(rax, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ Load(rdx, ExternalReference::date_cache_stamp(masm->isolate()));
-      __ cmpp(rdx, FieldOperand(rax, JSDate::kCacheStampOffset));
-      __ j(not_equal, &stamp_mismatch, Label::kNear);
-      __ movp(rax, FieldOperand(
-                       rax, JSDate::kValueOffset + field_index * kPointerSize));
-      __ ret(1 * kPointerSize);
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2);
-    __ Move(arg_reg_1, rax);
-    __ Move(arg_reg_2, Smi::FromInt(field_index));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ ret(1 * kPointerSize);
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Move(rbx, Smi::FromInt(0));
-    __ EnterBuiltinFrame(rsi, rdi, rbx);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : argc
@@ -1948,9 +2004,8 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ EnterBuiltinFrame(rsi, rdi, r8);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(rsi, rdi, r8);
   }
   __ jmp(&drop_frame_and_ret, Label::kNear);
@@ -2017,11 +2072,10 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ EnterBuiltinFrame(rsi, rdi, r8);
       __ Push(rdx);
       __ Move(rax, rbx);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Move(rbx, rax);
       __ Pop(rdx);
       __ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2061,32 +2115,6 @@
   }
 }
 
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
-                                       Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- rax : actual number of arguments
-  //  -- rbx : expected number of arguments
-  //  -- rdx : new target (passed through to callee)
-  //  -- rdi : function (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
-  __ movp(rcx, rsp);
-  // Make rcx the space we have left. The stack might already be overflowed
-  // here which will cause rcx to become negative.
-  __ subp(rcx, r8);
-  // Make r8 the space we need for the array when it is unrolled onto the
-  // stack.
-  __ movp(r8, rbx);
-  __ shlp(r8, Immediate(kPointerSizeLog2));
-  // Check if the arguments will overflow the stack.
-  __ cmpp(rcx, r8);
-  __ j(less_equal, stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ pushq(rbp);
   __ movp(rbp, rsp);
@@ -2161,25 +2189,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in rax.
-  Label not_smi;
-  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax : actual number of arguments
@@ -2201,7 +2210,8 @@
   {  // Enough parameters: Actual >= expected.
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // The registers rcx and r8 will be modified. The register rbx is only read.
+    Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
 
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2222,7 +2232,8 @@
     __ bind(&too_few);
 
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // The registers rcx and r8 will be modified. The register rbx is only read.
+    Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
 
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
diff --git a/src/builtins/x87/builtins-x87.cc b/src/builtins/x87/builtins-x87.cc
index 9c46f20..8e096a3 100644
--- a/src/builtins/x87/builtins-x87.cc
+++ b/src/builtins/x87/builtins-x87.cc
@@ -591,6 +591,13 @@
   __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
   __ j(not_equal, &switch_to_different_code_kind);
 
+  // Increment invocation count for the function.
+  __ EmitLoadTypeFeedbackVector(ecx);
+  __ add(FieldOperand(ecx,
+                      TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize),
+         Immediate(Smi::FromInt(1)));
+
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -704,20 +711,47 @@
   __ ret(0);
 }
 
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+                                        Register scratch1, Register scratch2,
+                                        Label* stack_overflow,
+                                        bool include_receiver = false) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+  // Make scratch2 the space we have left. The stack might already be overflowed
+  // here which will cause scratch2 to become negative.
+  __ mov(scratch2, esp);
+  __ sub(scratch2, scratch1);
+  // Make scratch1 the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(scratch1, num_args);
+  if (include_receiver) {
+    __ add(scratch1, Immediate(1));
+  }
+  __ shl(scratch1, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmp(scratch2, scratch1);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
-                                         Register array_limit) {
+                                         Register array_limit,
+                                         Register start_address) {
   // ----------- S t a t e -------------
-  //  -- ebx : Pointer to the last argument in the args array.
+  //  -- start_address : Pointer to the last argument in the args array.
   //  -- array_limit : Pointer to one before the first argument in the
   //                   args array.
   // -----------------------------------
   Label loop_header, loop_check;
   __ jmp(&loop_check);
   __ bind(&loop_header);
-  __ Push(Operand(ebx, 0));
-  __ sub(ebx, Immediate(kPointerSize));
+  __ Push(Operand(start_address, 0));
+  __ sub(start_address, Immediate(kPointerSize));
   __ bind(&loop_check);
-  __ cmp(ebx, array_limit);
+  __ cmp(start_address, array_limit);
   __ j(greater, &loop_header, Label::kNear);
 }
 
@@ -732,18 +766,26 @@
   //           they are to be pushed onto the stack.
   //  -- edi : the target to call (can be any Object).
   // -----------------------------------
+  Label stack_overflow;
+  // Compute the expected number of arguments.
+  __ mov(ecx, eax);
+  __ add(ecx, Immediate(1));  // Add one for receiver.
+
+  // Add a stack check before pushing the arguments. We need an extra register
+  // to perform a stack check. So push it onto the stack temporarily. This
+  // might cause stack overflow, but it will be detected by the check.
+  __ Push(edi);
+  Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
+  __ Pop(edi);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ Pop(edx);
 
   // Find the address of the last argument.
-  __ mov(ecx, eax);
-  __ add(ecx, Immediate(1));  // Add one for receiver.
   __ shl(ecx, kPointerSizeLog2);
   __ neg(ecx);
   __ add(ecx, ebx);
-
-  Generate_InterpreterPushArgs(masm, ecx);
+  Generate_InterpreterPushArgs(masm, ecx, ebx);
 
   // Call the target.
   __ Push(edx);  // Re-push return address.
@@ -758,43 +800,210 @@
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
   }
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edi);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
+namespace {
+
+// This function modified start_addr, and only reads the contents of num_args
+// register. scratch1 and scratch2 are used as temporary registers. Their
+// original values are restored after the use.
+void Generate_InterpreterPushArgsAndReturnAddress(
+    MacroAssembler* masm, Register num_args, Register start_addr,
+    Register scratch1, Register scratch2, bool receiver_in_args,
+    int num_slots_above_ret_addr, Label* stack_overflow) {
+  // We have to move return address and the temporary registers above it
+  // before we can copy arguments onto the stack. To achieve this:
+  // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
+  // Step 2: Move the return address and values above it to the top of stack.
+  // Step 3: Copy the arguments into the correct locations.
+  //  current stack    =====>    required stack layout
+  // |             |            | scratch1      | (2) <-- esp(1)
+  // |             |            | ....          | (2)
+  // |             |            | scratch-n     | (2)
+  // |             |            | return addr   | (2)
+  // |             |            | arg N         | (3)
+  // | scratch1    | <-- esp    | ....          |
+  // | ....        |            | arg 0         |
+  // | scratch-n   |            | arg 0         |
+  // | return addr |            | receiver slot |
+
+  // Check for stack overflow before we increment the stack pointer.
+  Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
+                              stack_overflow, true);
+
+// Step 1 - Update the stack pointer. scratch1 already contains the required
+// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
+// the Generate_StackOverflowCheck.
+
+#ifdef _MSC_VER
+  // TODO(mythria): Move it to macro assembler.
+  // In windows, we cannot increment the stack size by more than one page
+  // (mimimum page size is 4KB) without accessing at least one byte on the
+  // page. Check this:
+  // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+  const int page_size = 4 * 1024;
+  Label check_offset, update_stack_pointer;
+  __ bind(&check_offset);
+  __ cmp(scratch1, page_size);
+  __ j(less, &update_stack_pointer);
+  __ sub(esp, Immediate(page_size));
+  // Just to touch the page, before we increment further.
+  __ mov(Operand(esp, 0), Immediate(0));
+  __ sub(scratch1, Immediate(page_size));
+  __ jmp(&check_offset);
+  __ bind(&update_stack_pointer);
+#endif
+
+  __ sub(esp, scratch1);
+
+  // Step 2 move return_address and slots above it to the correct locations.
+  // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
+  // basically when the source and destination overlap. We at least need one
+  // extra slot for receiver, so no extra checks are required to avoid copy.
+  for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
+    __ mov(scratch1,
+           Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
+    __ mov(Operand(esp, i * kPointerSize), scratch1);
+  }
+
+  // Step 3 copy arguments to correct locations.
+  if (receiver_in_args) {
+    __ mov(scratch1, num_args);
+    __ add(scratch1, Immediate(1));
+  } else {
+    // Slot meant for receiver contains return address. Reset it so that
+    // we will not incorrectly interpret return address as an object.
+    __ mov(Operand(esp, num_args, times_pointer_size,
+                   (num_slots_above_ret_addr + 1) * kPointerSize),
+           Immediate(0));
+    __ mov(scratch1, num_args);
+  }
+
+  Label loop_header, loop_check;
+  __ jmp(&loop_check);
+  __ bind(&loop_header);
+  __ mov(scratch2, Operand(start_addr, 0));
+  __ mov(Operand(esp, scratch1, times_pointer_size,
+                 num_slots_above_ret_addr * kPointerSize),
+         scratch2);
+  __ sub(start_addr, Immediate(kPointerSize));
+  __ sub(scratch1, Immediate(1));
+  __ bind(&loop_check);
+  __ cmp(scratch1, Immediate(0));
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+}  // end anonymous namespace
+
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+    MacroAssembler* masm, CallableType construct_type) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edx : the new target
   //  -- edi : the constructor
-  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //  -- ebx : allocation site feedback (if available or undefined)
+  //  -- ecx : the address of the first argument to be pushed. Subsequent
   //           arguments should be consecutive above this, in the same order as
   //           they are to be pushed onto the stack.
   // -----------------------------------
-
-  // Pop return address to allow tail-call after pushing arguments.
-  __ Pop(ecx);
-
-  // Push edi in the slot meant for receiver. We need an extra register
-  // so store edi temporarily on stack.
+  Label stack_overflow;
+  // We need two scratch registers. Push edi and edx onto stack.
   __ Push(edi);
+  __ Push(edx);
 
-  // Find the address of the last argument.
-  __ mov(edi, eax);
-  __ neg(edi);
-  __ shl(edi, kPointerSizeLog2);
-  __ add(edi, ebx);
+  // Push arguments and move return address to the top of stack.
+  // The eax register is readonly. The ecx register will be modified. The edx
+  // and edi registers will be modified but restored to their original values.
+  Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, false,
+                                               2, &stack_overflow);
 
-  Generate_InterpreterPushArgs(masm, edi);
+  // Restore edi and edx
+  __ Pop(edx);
+  __ Pop(edi);
 
-  // Restore the constructor from slot on stack. It was pushed at the slot
-  // meant for receiver.
-  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+  __ AssertUndefinedOrAllocationSite(ebx);
+  if (construct_type == CallableType::kJSFunction) {
+    // Tail call to the function-specific construct stub (still in the caller
+    // context at this point).
+    __ AssertFunction(edi);
 
-  // Re-push return address.
-  __ Push(ecx);
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+    __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+    __ jmp(ecx);
+  } else {
+    DCHECK_EQ(construct_type, CallableType::kAny);
 
-  // Call the constructor with unmodified eax, edi, ebi values.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+    // Call the constructor with unmodified eax, edi, edx values.
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edx);
+    __ Pop(edi);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the target to call checked to be Array function.
+  //  -- ebx : the allocation site feedback
+  //  -- ecx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+  Label stack_overflow;
+  // We need two scratch registers. Register edi is available, push edx onto
+  // stack.
+  __ Push(edx);
+
+  // Push arguments and move return address to the top of stack.
+  // The eax register is readonly. The ecx register will be modified. The edx
+  // and edi registers will be modified but restored to their original values.
+  Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, true,
+                                               1, &stack_overflow);
+
+  // Restore edx.
+  __ Pop(edx);
+
+  // Array constructor expects constructor in edi. It is same as edx here.
+  __ Move(edi, edx);
+
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&stack_overflow);
+  {
+    // Pop the temporary registers, so that return address is on top of stack.
+    __ Pop(edx);
+
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+    // This should be unreachable.
+    __ int3();
+  }
 }
 
 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1223,61 +1432,6 @@
 }
 
 // static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
-                                               int field_index) {
-  // ----------- S t a t e -------------
-  //  -- eax    : number of arguments
-  //  -- edi    : function
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  //  -- esp[4] : receiver
-  // -----------------------------------
-
-  // 1. Load receiver into eax and check that it's actually a JSDate object.
-  Label receiver_not_date;
-  {
-    __ mov(eax, Operand(esp, kPointerSize));
-    __ JumpIfSmi(eax, &receiver_not_date);
-    __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
-    __ j(not_equal, &receiver_not_date);
-  }
-
-  // 2. Load the specified date field, falling back to the runtime as necessary.
-  if (field_index == JSDate::kDateValue) {
-    __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
-  } else {
-    if (field_index < JSDate::kFirstUncachedField) {
-      Label stamp_mismatch;
-      __ mov(edx, Operand::StaticVariable(
-                      ExternalReference::date_cache_stamp(masm->isolate())));
-      __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
-      __ j(not_equal, &stamp_mismatch, Label::kNear);
-      __ mov(eax, FieldOperand(
-                      eax, JSDate::kValueOffset + field_index * kPointerSize));
-      __ ret(1 * kPointerSize);
-      __ bind(&stamp_mismatch);
-    }
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ PrepareCallCFunction(2, ebx);
-    __ mov(Operand(esp, 0), eax);
-    __ mov(Operand(esp, 1 * kPointerSize),
-           Immediate(Smi::FromInt(field_index)));
-    __ CallCFunction(
-        ExternalReference::get_date_field_function(masm->isolate()), 2);
-  }
-  __ ret(1 * kPointerSize);
-
-  // 3. Raise a TypeError if the receiver is not a date.
-  __ bind(&receiver_not_date);
-  {
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Move(ebx, Immediate(0));
-    __ EnterBuiltinFrame(esi, edi, ebx);
-    __ CallRuntime(Runtime::kThrowNotDateError);
-  }
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -1904,10 +2058,9 @@
   __ bind(&to_string);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    ToStringStub stub(masm->isolate());
     __ SmiTag(ebx);
     __ EnterBuiltinFrame(esi, edi, ebx);
-    __ CallStub(&stub);
+    __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
     __ LeaveBuiltinFrame(esi, edi, ebx);
     __ SmiUntag(ebx);
   }
@@ -1971,11 +2124,10 @@
     __ bind(&convert);
     {
       FrameScope scope(masm, StackFrame::MANUAL);
-      ToStringStub stub(masm->isolate());
       __ SmiTag(ebx);
       __ EnterBuiltinFrame(esi, edi, ebx);
       __ Push(edx);
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
       __ Pop(edx);
       __ LeaveBuiltinFrame(esi, edi, ebx);
       __ SmiUntag(ebx);
@@ -2026,32 +2178,6 @@
   }
 }
 
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
-                                       Label* stack_overflow) {
-  // ----------- S t a t e -------------
-  //  -- eax : actual number of arguments
-  //  -- ebx : expected number of arguments
-  //  -- edx : new target (passed through to callee)
-  // -----------------------------------
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edi, Operand::StaticVariable(real_stack_limit));
-  // Make ecx the space we have left. The stack might already be overflowed
-  // here which will cause ecx to become negative.
-  __ mov(ecx, esp);
-  __ sub(ecx, edi);
-  // Make edi the space we need for the array when it is unrolled onto the
-  // stack.
-  __ mov(edi, ebx);
-  __ shl(edi, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmp(ecx, edi);
-  __ j(less_equal, stack_overflow);  // Signed comparison.
-}
-
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
   __ mov(ebp, esp);
@@ -2767,24 +2893,6 @@
   __ TailCallRuntime(Runtime::kAbort);
 }
 
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label not_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  __ Ret();
-  __ bind(&not_smi);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ Ret();
-  __ bind(&not_heap_number);
-
-  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
-          RelocInfo::CODE_TARGET);
-}
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
@@ -2805,7 +2913,9 @@
   {  // Enough parameters: Actual >= expected.
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // edi is used as a scratch register. It should be restored from the frame
+    // when needed.
+    Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
 
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2825,9 +2935,10 @@
 
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
-
     EnterArgumentsAdaptorFrame(masm);
-    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+    // edi is used as a scratch register. It should be restored from the frame
+    // when needed.
+    Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
 
     // Remember expected arguments in ecx.
     __ mov(ecx, ebx);
diff --git a/src/checks.h b/src/checks.h
index 80404e8..0d7eed3 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -7,6 +7,7 @@
 
 #include "include/v8.h"
 #include "src/base/logging.h"
+#include "src/globals.h"
 
 namespace v8 {
 
@@ -17,10 +18,10 @@
 #ifdef ENABLE_SLOW_DCHECKS
 #define SLOW_DCHECK(condition) \
   CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
-extern bool FLAG_enable_slow_asserts;
+V8_EXPORT_PRIVATE extern bool FLAG_enable_slow_asserts;
 #else
 #define SLOW_DCHECK(condition) ((void) 0)
-const bool FLAG_enable_slow_asserts = false;
+static const bool FLAG_enable_slow_asserts = false;
 #endif
 
 }  // namespace internal
diff --git a/src/code-events.h b/src/code-events.h
index 9ae1cae..94f7dbd 100644
--- a/src/code-events.h
+++ b/src/code-events.h
@@ -7,6 +7,7 @@
 
 #include <unordered_set>
 
+#include "src/base/platform/mutex.h"
 #include "src/globals.h"
 
 namespace v8 {
@@ -114,13 +115,16 @@
   CodeEventDispatcher() {}
 
   bool AddListener(CodeEventListener* listener) {
+    base::LockGuard<base::Mutex> guard(&mutex_);
     return listeners_.insert(listener).second;
   }
   void RemoveListener(CodeEventListener* listener) {
+    base::LockGuard<base::Mutex> guard(&mutex_);
     listeners_.erase(listener);
   }
 
-#define CODE_EVENT_DISPATCH(code) \
+#define CODE_EVENT_DISPATCH(code)              \
+  base::LockGuard<base::Mutex> guard(&mutex_); \
   for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
 
   void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
@@ -173,6 +177,7 @@
 
  private:
   std::unordered_set<CodeEventListener*> listeners_;
+  base::Mutex mutex_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeEventDispatcher);
 };
diff --git a/src/code-factory.cc b/src/code-factory.cc
index 018f21d..7448591 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -82,6 +82,10 @@
 
 // static
 Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
+  if (FLAG_tf_load_ic_stub) {
+    return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
+                    LoadWithVectorDescriptor(isolate));
+  }
   return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
                   LoadWithVectorDescriptor(isolate));
 }
@@ -104,6 +108,10 @@
 
 // static
 Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
+  if (FLAG_tf_store_ic_stub) {
+    StoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
+    return make_callable(stub);
+  }
   StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
@@ -111,6 +119,10 @@
 // static
 Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
                                              LanguageMode language_mode) {
+  if (FLAG_tf_store_ic_stub) {
+    StoreICTFStub stub(isolate, StoreICState(language_mode));
+    return make_callable(stub);
+  }
   StoreICStub stub(isolate, StoreICState(language_mode));
   return make_callable(stub);
 }
@@ -179,14 +191,14 @@
 
 // static
 Callable CodeFactory::ToString(Isolate* isolate) {
-  ToStringStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->ToString(),
+                  TypeConversionDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::ToName(Isolate* isolate) {
-  ToNameStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->ToName(),
+                  TypeConversionDescriptor(isolate));
 }
 
 // static
@@ -228,6 +240,12 @@
 }
 
 // static
+Callable CodeFactory::OrdinaryHasInstance(Isolate* isolate) {
+  return Callable(isolate->builtins()->OrdinaryHasInstance(),
+                  CompareDescriptor(isolate));
+}
+
+// static
 Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
   RegExpConstructResultStub stub(isolate);
   return make_callable(stub);
@@ -398,38 +416,38 @@
 
 // static
 Callable CodeFactory::StringEqual(Isolate* isolate) {
-  StringEqualStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringEqual(),
+                  CompareDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringNotEqual(Isolate* isolate) {
-  StringNotEqualStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringNotEqual(),
+                  CompareDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringLessThan(Isolate* isolate) {
-  StringLessThanStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringLessThan(),
+                  CompareDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
-  StringLessThanOrEqualStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringLessThanOrEqual(),
+                  CompareDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
-  StringGreaterThanStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringGreaterThan(),
+                  CompareDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
-  StringGreaterThanOrEqualStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->StringGreaterThanOrEqual(),
+                  CompareDescriptor(isolate));
 }
 
 // static
@@ -594,9 +612,17 @@
 }
 
 // static
-Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
-  return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(),
-                  InterpreterPushArgsAndConstructDescriptor(isolate));
+Callable CodeFactory::InterpreterPushArgsAndConstruct(
+    Isolate* isolate, CallableType function_type) {
+  return Callable(
+      isolate->builtins()->InterpreterPushArgsAndConstruct(function_type),
+      InterpreterPushArgsAndConstructDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndConstructArray(Isolate* isolate) {
+  return Callable(isolate->builtins()->InterpreterPushArgsAndConstructArray(),
+                  InterpreterPushArgsAndConstructArrayDescriptor(isolate));
 }
 
 // static
diff --git a/src/code-factory.h b/src/code-factory.h
index 40b1ea4..59f069e 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -84,6 +84,8 @@
                                       OrdinaryToPrimitiveHint hint);
   static Callable NumberToString(Isolate* isolate);
 
+  static Callable OrdinaryHasInstance(Isolate* isolate);
+
   static Callable RegExpConstructResult(Isolate* isolate);
   static Callable RegExpExec(Isolate* isolate);
 
@@ -160,7 +162,9 @@
   static Callable InterpreterPushArgsAndCall(
       Isolate* isolate, TailCallMode tail_call_mode,
       CallableType function_type = CallableType::kAny);
-  static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
+  static Callable InterpreterPushArgsAndConstruct(
+      Isolate* isolate, CallableType function_type = CallableType::kAny);
+  static Callable InterpreterPushArgsAndConstructArray(Isolate* isolate);
   static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
   static Callable InterpreterOnStackReplacement(Isolate* isolate);
 };
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
index 06552ba..016814c 100644
--- a/src/code-stub-assembler.cc
+++ b/src/code-stub-assembler.cc
@@ -38,41 +38,23 @@
 #endif
 }
 
-Node* CodeStubAssembler::BooleanMapConstant() {
-  return HeapConstant(isolate()->factory()->boolean_map());
-}
-
-Node* CodeStubAssembler::EmptyStringConstant() {
-  return LoadRoot(Heap::kempty_stringRootIndex);
-}
-
-Node* CodeStubAssembler::HeapNumberMapConstant() {
-  return HeapConstant(isolate()->factory()->heap_number_map());
-}
-
 Node* CodeStubAssembler::NoContextConstant() {
   return SmiConstant(Smi::FromInt(0));
 }
 
-Node* CodeStubAssembler::MinusZeroConstant() {
-  return LoadRoot(Heap::kMinusZeroValueRootIndex);
-}
+#define HEAP_CONSTANT_ACCESSOR(rootName, name)     \
+  Node* CodeStubAssembler::name##Constant() {      \
+    return LoadRoot(Heap::k##rootName##RootIndex); \
+  }
+HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
+#undef HEAP_CONSTANT_ACCESSOR
 
-Node* CodeStubAssembler::NanConstant() {
-  return LoadRoot(Heap::kNanValueRootIndex);
-}
-
-Node* CodeStubAssembler::NullConstant() {
-  return LoadRoot(Heap::kNullValueRootIndex);
-}
-
-Node* CodeStubAssembler::UndefinedConstant() {
-  return LoadRoot(Heap::kUndefinedValueRootIndex);
-}
-
-Node* CodeStubAssembler::TheHoleConstant() {
-  return LoadRoot(Heap::kTheHoleValueRootIndex);
-}
+#define HEAP_CONSTANT_TEST(rootName, name)         \
+  Node* CodeStubAssembler::Is##name(Node* value) { \
+    return WordEqual(value, name##Constant());     \
+  }
+HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
+#undef HEAP_CONSTANT_TEST
 
 Node* CodeStubAssembler::HashSeed() {
   return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
@@ -86,7 +68,7 @@
   if (mode == SMI_PARAMETERS) {
     return SmiConstant(Smi::FromInt(value));
   } else {
-    DCHECK_EQ(INTEGER_PARAMETERS, mode);
+    DCHECK(mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS);
     return IntPtrConstant(value);
   }
 }
@@ -284,7 +266,7 @@
 
 Node* CodeStubAssembler::SmiFromWord32(Node* value) {
   value = ChangeInt32ToIntPtr(value);
-  return WordShl(value, SmiShiftBitsConstant());
+  return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
 }
 
 Node* CodeStubAssembler::SmiTag(Node* value) {
@@ -292,15 +274,15 @@
   if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
     return SmiConstant(Smi::FromInt(constant_value));
   }
-  return WordShl(value, SmiShiftBitsConstant());
+  return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
 }
 
 Node* CodeStubAssembler::SmiUntag(Node* value) {
-  return WordSar(value, SmiShiftBitsConstant());
+  return WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant());
 }
 
 Node* CodeStubAssembler::SmiToWord32(Node* value) {
-  Node* result = WordSar(value, SmiShiftBitsConstant());
+  Node* result = SmiUntag(value);
   if (Is64()) {
     result = TruncateInt64ToInt32(result);
   }
@@ -325,10 +307,18 @@
 
 Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
 
+Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
+  return UintPtrGreaterThan(a, b);
+}
+
 Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
   return UintPtrGreaterThanOrEqual(a, b);
 }
 
+Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
+  return UintPtrLessThan(a, b);
+}
+
 Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
   return IntPtrLessThan(a, b);
 }
@@ -337,19 +327,12 @@
   return IntPtrLessThanOrEqual(a, b);
 }
 
+Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
+  return Select(SmiLessThan(a, b), b, a);
+}
+
 Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
-  // TODO(bmeurer): Consider using Select once available.
-  Variable min(this, MachineRepresentation::kTagged);
-  Label if_a(this), if_b(this), join(this);
-  BranchIfSmiLessThan(a, b, &if_a, &if_b);
-  Bind(&if_a);
-  min.Bind(a);
-  Goto(&join);
-  Bind(&if_b);
-  min.Bind(b);
-  Goto(&join);
-  Bind(&join);
-  return min.value();
+  return Select(SmiLessThan(a, b), a, b);
 }
 
 Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
@@ -485,80 +468,6 @@
                    IntPtrConstant(0));
 }
 
-void CodeStubAssembler::BranchIfSameValueZero(Node* a, Node* b, Node* context,
-                                              Label* if_true, Label* if_false) {
-  Node* number_map = HeapNumberMapConstant();
-  Label a_isnumber(this), a_isnotnumber(this), b_isnumber(this), a_isnan(this),
-      float_not_equal(this);
-  // If register A and register B are identical, goto `if_true`
-  GotoIf(WordEqual(a, b), if_true);
-  // If either register A or B are Smis, goto `if_false`
-  GotoIf(Word32Or(WordIsSmi(a), WordIsSmi(b)), if_false);
-  // GotoIf(WordIsSmi(b), if_false);
-
-  Node* a_map = LoadMap(a);
-  Node* b_map = LoadMap(b);
-  Branch(WordEqual(a_map, number_map), &a_isnumber, &a_isnotnumber);
-
-  // If both register A and B are HeapNumbers, return true if they are equal,
-  // or if both are NaN
-  Bind(&a_isnumber);
-  {
-    Branch(WordEqual(b_map, number_map), &b_isnumber, if_false);
-
-    Bind(&b_isnumber);
-    Node* a_value = LoadHeapNumberValue(a);
-    Node* b_value = LoadHeapNumberValue(b);
-    BranchIfFloat64Equal(a_value, b_value, if_true, &float_not_equal);
-
-    Bind(&float_not_equal);
-    BranchIfFloat64IsNaN(a_value, &a_isnan, if_false);
-
-    Bind(&a_isnan);
-    BranchIfFloat64IsNaN(a_value, if_true, if_false);
-  }
-
-  Bind(&a_isnotnumber);
-  {
-    Label a_isstring(this), a_isnotstring(this);
-    Node* a_instance_type = LoadMapInstanceType(a_map);
-
-    Branch(Int32LessThan(a_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
-           &a_isstring, &a_isnotstring);
-
-    Bind(&a_isstring);
-    {
-      Label b_isstring(this), b_isnotstring(this);
-      Node* b_instance_type = LoadInstanceType(b_map);
-
-      Branch(
-          Int32LessThan(b_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
-          &b_isstring, if_false);
-
-      Bind(&b_isstring);
-      {
-        Callable callable = CodeFactory::StringEqual(isolate());
-        Node* result = CallStub(callable, context, a, b);
-        Branch(WordEqual(BooleanConstant(true), result), if_true, if_false);
-      }
-    }
-
-    Bind(&a_isnotstring);
-    {
-      // Check if {lhs} is a Simd128Value.
-      Label a_issimd128value(this);
-      Branch(Word32Equal(a_instance_type, Int32Constant(SIMD128_VALUE_TYPE)),
-             &a_issimd128value, if_false);
-
-      Bind(&a_issimd128value);
-      {
-        // Load the map of {rhs}.
-        BranchIfSimd128Equal(a, a_map, b, b_map, if_true, if_false);
-      }
-    }
-  }
-}
-
 void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
                                              Node* rhs, Node* rhs_map,
                                              Label* if_equal,
@@ -630,69 +539,61 @@
   Goto(if_notequal);
 }
 
+void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
+    Node* receiver_map, Label* definitely_no_elements,
+    Label* possibly_elements) {
+  Variable var_map(this, MachineRepresentation::kTagged);
+  var_map.Bind(receiver_map);
+  Label loop_body(this, &var_map);
+  Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+  Goto(&loop_body);
+
+  Bind(&loop_body);
+  {
+    Node* map = var_map.value();
+    Node* prototype = LoadMapPrototype(map);
+    GotoIf(WordEqual(prototype, NullConstant()), definitely_no_elements);
+    Node* prototype_map = LoadMap(prototype);
+    // Pessimistically assume elements if a Proxy, Special API Object,
+    // or JSValue wrapper is found on the prototype chain. After this
+    // instance type check, it's not necessary to check for interceptors or
+    // access checks.
+    GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(prototype_map),
+                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+           possibly_elements);
+    GotoIf(WordNotEqual(LoadElements(prototype), empty_elements),
+           possibly_elements);
+    var_map.Bind(prototype_map);
+    Goto(&loop_body);
+  }
+}
+
 void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
                                             Label* if_true, Label* if_false) {
-  Node* int32_zero = Int32Constant(0);
-  Node* int32_one = Int32Constant(1);
-
-  Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
-
-  Variable last_map(this, MachineRepresentation::kTagged);
-  Label check_prototype(this);
-
-  // Bailout if Smi
+  // Bailout if receiver is a Smi.
   GotoIf(WordIsSmi(object), if_false);
 
   Node* map = LoadMap(object);
-  last_map.Bind(map);
 
-  // Bailout if instance type is not JS_ARRAY_TYPE
+  // Bailout if instance type is not JS_ARRAY_TYPE.
   GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
          if_false);
 
   Node* bit_field2 = LoadMapBitField2(map);
   Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
 
-  // Bailout if slow receiver elements
+  // Bailout if receiver has slow elements.
   GotoIf(
       Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
       if_false);
 
+  // Check prototype chain if receiver does not have packed elements.
   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
   STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
-
-  // Check prototype chain if receiver does not have packed elements
-  Node* holey_elements = Word32And(elements_kind, int32_one);
-  Branch(Word32Equal(holey_elements, int32_zero), if_true, &check_prototype);
-
-  Bind(&check_prototype);
-  {
-    Label loop_body(this, &last_map);
-    Goto(&loop_body);
-    Bind(&loop_body);
-    Node* current_map = last_map.value();
-    Node* proto = LoadObjectField(current_map, Map::kPrototypeOffset);
-
-    // End loop
-    GotoIf(WordEqual(proto, NullConstant()), if_true);
-
-    // ASSERT: proto->IsHeapObject()
-    Node* proto_map = LoadMap(proto);
-
-    // Bailout if a Proxy, API Object, or JSValue wrapper found in prototype
-    // Because of this bailout, it's not necessary to check for interceptors or
-    // access checks on the prototype chain.
-    GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(proto_map),
-                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
-           if_false);
-
-    // Bailout if prototype contains non-empty elements
-    GotoUnless(WordEqual(LoadElements(proto), empty_elements), if_false);
-
-    last_map.Bind(proto_map);
-    Goto(&loop_body);
-  }
+  Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
+  GotoIf(Word32Equal(holey_elements, Int32Constant(0)), if_true);
+  BranchIfPrototypesHaveNoElements(map, if_true, if_false);
 }
 
 Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
@@ -859,9 +760,8 @@
     // types, the HeapNumber type and everything else.
     GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
            &if_valueisheapnumber);
-    Branch(
-        Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
-        &if_valueisstring, &if_valueisother);
+    Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
+           &if_valueisother);
 
     Bind(&if_valueisstring);
     {
@@ -1008,6 +908,10 @@
   return LoadObjectField(object, JSObject::kElementsOffset);
 }
 
+Node* CodeStubAssembler::LoadJSArrayLength(compiler::Node* array) {
+  return LoadObjectField(array, JSArray::kLengthOffset);
+}
+
 Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
   return LoadObjectField(array, FixedArrayBase::kLengthOffset);
 }
@@ -1032,6 +936,11 @@
   return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8());
 }
 
+Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
+  Node* bit_field2 = LoadMapBitField2(map);
+  return BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+}
+
 Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
   return LoadObjectField(map, Map::kDescriptorsOffset);
 }
@@ -1041,7 +950,8 @@
 }
 
 Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
-  return LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8());
+  return ChangeUint32ToWord(
+      LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
 }
 
 Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
@@ -1049,9 +959,19 @@
   STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
   Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
                                  Int32Constant(FIRST_JS_OBJECT_TYPE)));
-  return LoadObjectField(
+  return ChangeUint32ToWord(LoadObjectField(
       map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
-      MachineType::Uint8());
+      MachineType::Uint8()));
+}
+
+Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
+  // See Map::GetConstructorFunctionIndex() for details.
+  STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+  Assert(Int32LessThanOrEqual(LoadMapInstanceType(map),
+                              Int32Constant(LAST_PRIMITIVE_TYPE)));
+  return ChangeUint32ToWord(LoadObjectField(
+      map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+      MachineType::Uint8()));
 }
 
 Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
@@ -1081,7 +1001,7 @@
 Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
   Node* hash_field = LoadNameHashField(name);
   if (if_hash_not_computed != nullptr) {
-    GotoIf(WordEqual(
+    GotoIf(Word32Equal(
                Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
                Int32Constant(0)),
            if_hash_not_computed);
@@ -1105,19 +1025,6 @@
   return value;
 }
 
-Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
-  Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
-  Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
-  Node* total_size = IntPtrAdd(data_size, header_size);
-
-  Node* result = Allocate(total_size, kNone);
-  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex));
-  StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
-      SmiTag(length));
-
-  return result;
-}
-
 Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
                                                int additional_offset,
                                                ParameterMode parameter_mode) {
@@ -1149,29 +1056,57 @@
 
 Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
     Node* object, Node* index_node, MachineType machine_type,
-    int additional_offset, ParameterMode parameter_mode) {
+    int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
   int32_t header_size =
       FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
   Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
                                         parameter_mode, header_size);
-  return Load(machine_type, object, offset);
+  return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
+}
+
+Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
+                                                 Label* if_hole,
+                                                 MachineType machine_type) {
+  if (if_hole) {
+    // TODO(ishell): Compare only the upper part for the hole once the
+    // compiler is able to fold addition of already complex |offset| with
+    // |kIeeeDoubleExponentWordOffset| into one addressing mode.
+    if (Is64()) {
+      Node* element = Load(MachineType::Uint64(), base, offset);
+      GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
+    } else {
+      Node* element_upper = Load(
+          MachineType::Uint32(), base,
+          IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
+      GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
+             if_hole);
+    }
+  }
+  if (machine_type.IsNone()) {
+    // This means the actual value is not needed.
+    return nullptr;
+  }
+  return Load(machine_type, base, offset);
+}
+
+Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
+  int offset = Context::SlotOffset(slot_index);
+  return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
 }
 
 Node* CodeStubAssembler::LoadNativeContext(Node* context) {
-  return LoadFixedArrayElement(context,
-                               Int32Constant(Context::NATIVE_CONTEXT_INDEX));
+  return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
 }
 
 Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
                                                 Node* native_context) {
   return LoadFixedArrayElement(native_context,
-                               Int32Constant(Context::ArrayMapIndex(kind)));
+                               IntPtrConstant(Context::ArrayMapIndex(kind)));
 }
 
 Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
-  return StoreNoWriteBarrier(
-      MachineRepresentation::kFloat64, object,
-      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+  return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
+                                        MachineRepresentation::kFloat64);
 }
 
 Node* CodeStubAssembler::StoreObjectField(
@@ -1180,12 +1115,32 @@
                IntPtrConstant(offset - kHeapObjectTag), value);
 }
 
+Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
+                                          Node* value) {
+  int const_offset;
+  if (ToInt32Constant(offset, const_offset)) {
+    return StoreObjectField(object, const_offset, value);
+  }
+  return Store(MachineRepresentation::kTagged, object,
+               IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+}
+
 Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
     Node* object, int offset, Node* value, MachineRepresentation rep) {
   return StoreNoWriteBarrier(rep, object,
                              IntPtrConstant(offset - kHeapObjectTag), value);
 }
 
+Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+    Node* object, Node* offset, Node* value, MachineRepresentation rep) {
+  int const_offset;
+  if (ToInt32Constant(offset, const_offset)) {
+    return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
+  }
+  return StoreNoWriteBarrier(
+      rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+}
+
 Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
   return StoreNoWriteBarrier(
       MachineRepresentation::kTagged, object,
@@ -1227,14 +1182,19 @@
   return StoreNoWriteBarrier(rep, object, offset, value);
 }
 
-Node* CodeStubAssembler::AllocateHeapNumber() {
+Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
   Node* result = Allocate(HeapNumber::kSize, kNone);
-  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+  Heap::RootListIndex heap_map_index =
+      mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
+                        : Heap::kMutableHeapNumberMapRootIndex;
+  Node* map = LoadRoot(heap_map_index);
+  StoreMapNoWriteBarrier(result, map);
   return result;
 }
 
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
-  Node* result = AllocateHeapNumber();
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
+                                                     MutableMode mode) {
+  Node* result = AllocateHeapNumber(mode);
   StoreHeapNumberValue(result, value);
   return result;
 }
@@ -1261,8 +1221,7 @@
           IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
           IntPtrConstant(kObjectAlignmentMask)),
       IntPtrConstant(~kObjectAlignmentMask));
-  Branch(IntPtrLessThanOrEqual(size,
-                               IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+  Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
          &if_sizeissmall, &if_notsizeissmall);
 
   Bind(&if_sizeissmall);
@@ -1314,8 +1273,7 @@
                           IntPtrConstant(SeqTwoByteString::kHeaderSize)),
                 IntPtrConstant(kObjectAlignmentMask)),
       IntPtrConstant(~kObjectAlignmentMask));
-  Branch(IntPtrLessThanOrEqual(size,
-                               IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+  Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
          &if_sizeissmall, &if_notsizeissmall);
 
   Bind(&if_sizeissmall);
@@ -1345,51 +1303,166 @@
   return var_result.value();
 }
 
-Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
-                                         Node* capacity_node, Node* length_node,
-                                         compiler::Node* allocation_site,
-                                         ParameterMode mode) {
-  bool is_double = IsFastDoubleElementsKind(kind);
-  int base_size = JSArray::kSize + FixedArray::kHeaderSize;
-  int elements_offset = JSArray::kSize;
+Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+                                                     Node* offset) {
+  Node* result = Allocate(SlicedString::kSize);
+  Node* map = LoadRoot(Heap::kSlicedOneByteStringMapRootIndex);
+  StoreMapNoWriteBarrier(result, map);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+                                 MachineRepresentation::kTagged);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+                                 Int32Constant(String::kEmptyHashField),
+                                 MachineRepresentation::kWord32);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
+                                 MachineRepresentation::kTagged);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
+                                 MachineRepresentation::kTagged);
+  return result;
+}
 
-  Comment("begin allocation of JSArray");
+Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
+                                                     Node* offset) {
+  Node* result = Allocate(SlicedString::kSize);
+  Node* map = LoadRoot(Heap::kSlicedStringMapRootIndex);
+  StoreMapNoWriteBarrier(result, map);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+                                 MachineRepresentation::kTagged);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+                                 Int32Constant(String::kEmptyHashField),
+                                 MachineRepresentation::kWord32);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
+                                 MachineRepresentation::kTagged);
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
+                                 MachineRepresentation::kTagged);
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
+                                              Node* index, Node* input) {
+  Node* const max_length =
+      SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
+  Assert(SmiLessThanOrEqual(length, max_length));
+
+  // Allocate the JSRegExpResult.
+  // TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
+  // unneeded store of elements.
+  Node* const result = Allocate(JSRegExpResult::kSize);
+
+  // TODO(jgruber): Store map as Heap constant?
+  Node* const native_context = LoadNativeContext(context);
+  Node* const map =
+      LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+  StoreMapNoWriteBarrier(result, map);
+
+  // Initialize the header before allocating the elements.
+  Node* const empty_array = EmptyFixedArrayConstant();
+  DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOffset,
+                                 empty_array);
+  StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, empty_array);
+  StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
+
+  StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
+  StoreObjectField(result, JSRegExpResult::kInputOffset, input);
+
+  Node* const zero = IntPtrConstant(0);
+  Node* const length_intptr = SmiUntag(length);
+  const ElementsKind elements_kind = FAST_ELEMENTS;
+  const ParameterMode parameter_mode = INTPTR_PARAMETERS;
+
+  Node* const elements =
+      AllocateFixedArray(elements_kind, length_intptr, parameter_mode);
+  StoreObjectField(result, JSArray::kElementsOffset, elements);
+
+  // Fill in the elements with undefined.
+  FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
+                          Heap::kUndefinedValueRootIndex, parameter_mode);
+
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
+    ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
+  Comment("begin allocation of JSArray without elements");
+  int base_size = JSArray::kSize;
+  if (allocation_site != nullptr) {
+    base_size += AllocationMemento::kSize;
+  }
+
+  Node* size = IntPtrConstant(base_size);
+  Node* array = AllocateUninitializedJSArray(kind, array_map, length,
+                                             allocation_site, size);
+  return array;
+}
+
+std::pair<Node*, Node*>
+CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
+    ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
+    Node* capacity, ParameterMode capacity_mode) {
+  Comment("begin allocation of JSArray with elements");
+  int base_size = JSArray::kSize;
 
   if (allocation_site != nullptr) {
     base_size += AllocationMemento::kSize;
-    elements_offset += AllocationMemento::kSize;
   }
 
-  Node* total_size =
-      ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
+  int elements_offset = base_size;
 
-  // Allocate both array and elements object, and initialize the JSArray.
-  Heap* heap = isolate()->heap();
-  Node* array = Allocate(total_size);
+  // Compute space for elements
+  base_size += FixedArray::kHeaderSize;
+  Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
+
+  Node* array = AllocateUninitializedJSArray(kind, array_map, length,
+                                             allocation_site, size);
+
+  Node* elements = InnerAllocate(array, elements_offset);
+  StoreObjectField(array, JSObject::kElementsOffset, elements);
+
+  return {array, elements};
+}
+
+Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
+                                                      Node* array_map,
+                                                      Node* length,
+                                                      Node* allocation_site,
+                                                      Node* size_in_bytes) {
+  Node* array = Allocate(size_in_bytes);
+
+  Comment("write JSArray headers");
   StoreMapNoWriteBarrier(array, array_map);
-  Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
-  StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
-                                 empty_properties);
-  StoreObjectFieldNoWriteBarrier(
-      array, JSArray::kLengthOffset,
-      mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
+
+  StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+
+  StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
 
   if (allocation_site != nullptr) {
     InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
   }
+  return array;
+}
 
+Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
+                                         Node* capacity, Node* length,
+                                         Node* allocation_site,
+                                         ParameterMode capacity_mode) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+
+  // Allocate both array and elements object, and initialize the JSArray.
+  Node *array, *elements;
+  std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+      kind, array_map, length, allocation_site, capacity, capacity_mode);
   // Setup elements object.
-  Node* elements = InnerAllocate(array, elements_offset);
-  StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+  Heap* heap = isolate()->heap();
   Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
                                      : heap->fixed_array_map());
   StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
-  StoreObjectFieldNoWriteBarrier(
-      elements, FixedArray::kLengthOffset,
-      mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
+  StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+                                 TagParameter(capacity, capacity_mode));
 
-  FillFixedArrayWithHole(kind, elements, IntPtrConstant(0), capacity_node,
-                         mode);
+  // Fill in the elements with holes.
+  FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
+                          Heap::kTheHoleValueRootIndex, capacity_mode);
 
   return array;
 }
@@ -1398,7 +1471,7 @@
                                             Node* capacity_node,
                                             ParameterMode mode,
                                             AllocationFlags flags) {
-  Node* total_size = GetFixedAarrayAllocationSize(capacity_node, kind, mode);
+  Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
 
   // Allocate both array and elements object, and initialize the JSArray.
   Node* array = Allocate(total_size, flags);
@@ -1411,24 +1484,24 @@
   } else {
     StoreMapNoWriteBarrier(array, HeapConstant(map));
   }
-  StoreObjectFieldNoWriteBarrier(
-      array, FixedArray::kLengthOffset,
-      mode == INTEGER_PARAMETERS ? SmiTag(capacity_node) : capacity_node);
+  StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
+                                 TagParameter(capacity_node, mode));
   return array;
 }
 
-void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
-                                               compiler::Node* array,
-                                               compiler::Node* from_node,
-                                               compiler::Node* to_node,
-                                               ParameterMode mode) {
-  int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
-  Heap* heap = isolate()->heap();
-  Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
+void CodeStubAssembler::FillFixedArrayWithValue(
+    ElementsKind kind, Node* array, Node* from_node, Node* to_node,
+    Heap::RootListIndex value_root_index, ParameterMode mode) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+  DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
+         value_root_index == Heap::kUndefinedValueRootIndex);
+  DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex);
+  STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
   Node* double_hole =
       Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
-  DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
-  bool is_double = IsFastDoubleElementsKind(kind);
+  Node* value = LoadRoot(value_root_index);
+
+  const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
   int32_t to;
   bool constant_to = ToInt32Constant(to_node, to);
   int32_t from;
@@ -1436,8 +1509,9 @@
   if (constant_to && constant_from &&
       (to - from) <= kElementLoopUnrollThreshold) {
     for (int i = from; i < to; ++i) {
+      Node* index = IntPtrConstant(i);
       if (is_double) {
-        Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+        Node* offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                               first_element_offset);
         // Don't use doubles to store the hole double, since manipulating the
         // signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1453,14 +1527,14 @@
         } else {
           StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
                               double_hole);
-          offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+          offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                           first_element_offset + kPointerSize);
           StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
                               double_hole);
         }
       } else {
-        StoreFixedArrayElement(array, Int32Constant(i), hole,
-                               SKIP_WRITE_BARRIER);
+        StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER,
+                               INTPTR_PARAMETERS);
       }
     }
   } else {
@@ -1477,8 +1551,8 @@
     Bind(&decrement);
     current.Bind(IntPtrSub(
         current.value(),
-        Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
-                                                     : kPointerSize)));
+        IntPtrConstant(IsFastDoubleElementsKind(kind) ? kDoubleSize
+                                                      : kPointerSize)));
     if (is_double) {
       // Don't use doubles to store the hole double, since manipulating the
       // signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1494,15 +1568,13 @@
       } else {
         StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
                             Int32Constant(first_element_offset), double_hole);
-        StoreNoWriteBarrier(
-            MachineRepresentation::kWord32,
-            IntPtrAdd(current.value(),
-                      Int32Constant(kPointerSize + first_element_offset)),
-            double_hole);
+        StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
+                            Int32Constant(kPointerSize + first_element_offset),
+                            double_hole);
       }
     } else {
-      StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
-                          IntPtrConstant(first_element_offset), hole);
+      StoreNoWriteBarrier(MachineType::PointerRepresentation(), current.value(),
+                          IntPtrConstant(first_element_offset), value);
     }
     Node* compare = WordNotEqual(current.value(), limit);
     Branch(compare, &decrement, &done);
@@ -1511,50 +1583,236 @@
   }
 }
 
-void CodeStubAssembler::CopyFixedArrayElements(ElementsKind kind,
-                                               compiler::Node* from_array,
-                                               compiler::Node* to_array,
-                                               compiler::Node* element_count,
-                                               WriteBarrierMode barrier_mode,
-                                               ParameterMode mode) {
-  Label test(this);
-  Label done(this);
-  bool double_elements = IsFastDoubleElementsKind(kind);
-  bool needs_write_barrier =
-      barrier_mode == UPDATE_WRITE_BARRIER && IsFastObjectElementsKind(kind);
-  Node* limit_offset = ElementOffsetFromIndex(
-      IntPtrConstant(0), kind, mode, FixedArray::kHeaderSize - kHeapObjectTag);
-  Variable current_offset(this, MachineType::PointerRepresentation());
-  current_offset.Bind(ElementOffsetFromIndex(
-      element_count, kind, mode, FixedArray::kHeaderSize - kHeapObjectTag));
-  Label decrement(this, &current_offset);
+void CodeStubAssembler::CopyFixedArrayElements(
+    ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
+    Node* to_array, Node* element_count, Node* capacity,
+    WriteBarrierMode barrier_mode, ParameterMode mode) {
+  STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+  const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+  Comment("[ CopyFixedArrayElements");
 
-  Branch(WordEqual(current_offset.value(), limit_offset), &done, &decrement);
+  // Typed array elements are not supported.
+  DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
+  DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
+
+  Label done(this);
+  bool from_double_elements = IsFastDoubleElementsKind(from_kind);
+  bool to_double_elements = IsFastDoubleElementsKind(to_kind);
+  bool element_size_matches =
+      Is64() ||
+      IsFastDoubleElementsKind(from_kind) == IsFastDoubleElementsKind(to_kind);
+  bool doubles_to_objects_conversion =
+      IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind);
+  bool needs_write_barrier =
+      doubles_to_objects_conversion || (barrier_mode == UPDATE_WRITE_BARRIER &&
+                                        IsFastObjectElementsKind(to_kind));
+  Node* double_hole =
+      Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
+
+  if (doubles_to_objects_conversion) {
+    // If the copy might trigger a GC, make sure that the FixedArray is
+    // pre-initialized with holes to make sure that it's always in a
+    // consistent state.
+    FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
+                            capacity, Heap::kTheHoleValueRootIndex, mode);
+  } else if (element_count != capacity) {
+    FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
+                            Heap::kTheHoleValueRootIndex, mode);
+  }
+
+  Node* limit_offset = ElementOffsetFromIndex(
+      IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
+  Variable var_from_offset(this, MachineType::PointerRepresentation());
+  var_from_offset.Bind(ElementOffsetFromIndex(element_count, from_kind, mode,
+                                              first_element_offset));
+  // This second variable is used only when the element sizes of source and
+  // destination arrays do not match.
+  Variable var_to_offset(this, MachineType::PointerRepresentation());
+  if (element_size_matches) {
+    var_to_offset.Bind(var_from_offset.value());
+  } else {
+    var_to_offset.Bind(ElementOffsetFromIndex(element_count, to_kind, mode,
+                                              first_element_offset));
+  }
+
+  Variable* vars[] = {&var_from_offset, &var_to_offset};
+  Label decrement(this, 2, vars);
+
+  Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
 
   Bind(&decrement);
   {
-    current_offset.Bind(IntPtrSub(
-        current_offset.value(),
-        IntPtrConstant(double_elements ? kDoubleSize : kPointerSize)));
+    Node* from_offset = IntPtrSub(
+        var_from_offset.value(),
+        IntPtrConstant(from_double_elements ? kDoubleSize : kPointerSize));
+    var_from_offset.Bind(from_offset);
 
-    Node* value =
-        Load(double_elements ? MachineType::Float64() : MachineType::Pointer(),
-             from_array, current_offset.value());
+    Node* to_offset;
+    if (element_size_matches) {
+      to_offset = from_offset;
+    } else {
+      to_offset = IntPtrSub(
+          var_to_offset.value(),
+          IntPtrConstant(to_double_elements ? kDoubleSize : kPointerSize));
+      var_to_offset.Bind(to_offset);
+    }
+
+    Label next_iter(this), store_double_hole(this);
+    Label* if_hole;
+    if (doubles_to_objects_conversion) {
+      // The target elements array is already preinitialized with holes, so we
+      // can just proceed with the next iteration.
+      if_hole = &next_iter;
+    } else if (IsFastDoubleElementsKind(to_kind)) {
+      if_hole = &store_double_hole;
+    } else {
+      // In all the other cases don't check for holes and copy the data as is.
+      if_hole = nullptr;
+    }
+
+    Node* value = LoadElementAndPrepareForStore(
+        from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
+
     if (needs_write_barrier) {
-      Store(MachineRepresentation::kTagged, to_array,
-            current_offset.value(), value);
-    } else if (double_elements) {
-      StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array,
-                          current_offset.value(), value);
+      Store(MachineRepresentation::kTagged, to_array, to_offset, value);
+    } else if (to_double_elements) {
+      StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
+                          value);
     } else {
       StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
-                          current_offset.value(), value);
+                          to_offset, value);
     }
-    Node* compare = WordNotEqual(current_offset.value(), limit_offset);
+    Goto(&next_iter);
+
+    if (if_hole == &store_double_hole) {
+      Bind(&store_double_hole);
+      // Don't use doubles to store the hole double, since manipulating the
+      // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+      // change its value on ia32 (the x87 stack is used to return values
+      // and stores to the stack silently clear the signalling bit).
+      //
+      // TODO(danno): When we have a Float32/Float64 wrapper class that
+      // preserves double bits during manipulation, remove this code/change
+      // this to an indexed Float64 store.
+      if (Is64()) {
+        StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array, to_offset,
+                            double_hole);
+      } else {
+        StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, to_offset,
+                            double_hole);
+        StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array,
+                            IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)),
+                            double_hole);
+      }
+      Goto(&next_iter);
+    }
+
+    Bind(&next_iter);
+    Node* compare = WordNotEqual(from_offset, limit_offset);
     Branch(compare, &decrement, &done);
   }
 
   Bind(&done);
+  IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
+  Comment("] CopyFixedArrayElements");
+}
+
+void CodeStubAssembler::CopyStringCharacters(compiler::Node* from_string,
+                                             compiler::Node* to_string,
+                                             compiler::Node* from_index,
+                                             compiler::Node* character_count,
+                                             String::Encoding encoding) {
+  Label out(this);
+
+  // Nothing to do for zero characters.
+
+  GotoIf(SmiLessThanOrEqual(character_count, SmiConstant(Smi::FromInt(0))),
+         &out);
+
+  // Calculate offsets into the strings.
+
+  Node* from_offset;
+  Node* limit_offset;
+  Node* to_offset;
+
+  {
+    Node* byte_count = SmiUntag(character_count);
+    Node* from_byte_index = SmiUntag(from_index);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+      from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
+      limit_offset = IntPtrAdd(from_offset, byte_count);
+      to_offset = IntPtrConstant(offset);
+    } else {
+      STATIC_ASSERT(2 == sizeof(uc16));
+      byte_count = WordShl(byte_count, 1);
+      from_byte_index = WordShl(from_byte_index, 1);
+
+      const int offset = SeqTwoByteString::kHeaderSize - kHeapObjectTag;
+      from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
+      limit_offset = IntPtrAdd(from_offset, byte_count);
+      to_offset = IntPtrConstant(offset);
+    }
+  }
+
+  Variable var_from_offset(this, MachineType::PointerRepresentation());
+  Variable var_to_offset(this, MachineType::PointerRepresentation());
+
+  var_from_offset.Bind(from_offset);
+  var_to_offset.Bind(to_offset);
+
+  Variable* vars[] = {&var_from_offset, &var_to_offset};
+  Label decrement(this, 2, vars);
+
+  Label loop(this, 2, vars);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    from_offset = var_from_offset.value();
+    to_offset = var_to_offset.value();
+
+    // TODO(jgruber): We could make this faster through larger copy unit sizes.
+    Node* value = Load(MachineType::Uint8(), from_string, from_offset);
+    StoreNoWriteBarrier(MachineRepresentation::kWord8, to_string, to_offset,
+                        value);
+
+    Node* new_from_offset = IntPtrAdd(from_offset, IntPtrConstant(1));
+    var_from_offset.Bind(new_from_offset);
+    var_to_offset.Bind(IntPtrAdd(to_offset, IntPtrConstant(1)));
+
+    Branch(WordNotEqual(new_from_offset, limit_offset), &loop, &out);
+  }
+
+  Bind(&out);
+}
+
+Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
+                                                       Node* offset,
+                                                       ElementsKind from_kind,
+                                                       ElementsKind to_kind,
+                                                       Label* if_hole) {
+  if (IsFastDoubleElementsKind(from_kind)) {
+    Node* value =
+        LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
+    if (!IsFastDoubleElementsKind(to_kind)) {
+      value = AllocateHeapNumberWithValue(value);
+    }
+    return value;
+
+  } else {
+    Node* value = Load(MachineType::Pointer(), array, offset);
+    if (if_hole) {
+      GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
+    }
+    if (IsFastDoubleElementsKind(to_kind)) {
+      if (IsFastSmiElementsKind(from_kind)) {
+        value = SmiToFloat64(value);
+      } else {
+        value = LoadHeapNumberValue(value);
+      }
+    }
+    return value;
+  }
 }
 
 Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
@@ -1563,7 +1821,7 @@
   Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
   Node* unconditioned_result =
       IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
-  if (mode == INTEGER_PARAMETERS) {
+  if (mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS) {
     return unconditioned_result;
   } else {
     int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
@@ -1572,50 +1830,64 @@
   }
 }
 
-Node* CodeStubAssembler::CheckAndGrowElementsCapacity(Node* context,
-                                                      Node* elements,
-                                                      ElementsKind kind,
-                                                      Node* key, Label* fail) {
+Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
+                                                 ElementsKind kind, Node* key,
+                                                 Label* bailout) {
   Node* capacity = LoadFixedArrayBaseLength(elements);
 
-  // On 32-bit platforms, there is a slight performance advantage to doing all
-  // of the arithmetic for the new backing store with SMIs, since it's possible
-  // to save a few tag/untag operations without paying an extra expense when
-  // calculating array offset (the smi math can be folded away) and there are
-  // fewer live ranges. Thus only convert |capacity| and |key| to untagged value
-  // on 64-bit platforms.
-  ParameterMode mode = Is64() ? INTEGER_PARAMETERS : SMI_PARAMETERS;
-  if (mode == INTEGER_PARAMETERS) {
-    capacity = SmiUntag(capacity);
-    key = SmiUntag(key);
-  }
+  ParameterMode mode = OptimalParameterMode();
+  capacity = UntagParameter(capacity, mode);
+  key = UntagParameter(key, mode);
+
+  return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
+                                 bailout);
+}
+
+Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
+                                                 ElementsKind kind, Node* key,
+                                                 Node* capacity,
+                                                 ParameterMode mode,
+                                                 Label* bailout) {
+  Comment("TryGrowElementsCapacity");
 
   // If the gap growth is too big, fall back to the runtime.
   Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
   Node* max_capacity = IntPtrAdd(capacity, max_gap);
-  GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), fail);
+  GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), bailout);
 
-  // Calculate the capacity of the new backing tore
+  // Calculate the capacity of the new backing store.
   Node* new_capacity = CalculateNewElementsCapacity(
       IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
+  return GrowElementsCapacity(object, elements, kind, kind, capacity,
+                              new_capacity, mode, bailout);
+}
 
+Node* CodeStubAssembler::GrowElementsCapacity(
+    Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
+    Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
+  Comment("[ GrowElementsCapacity");
   // If size of the allocation for the new capacity doesn't fit in a page
-  // that we can bump-pointer allocate from, fall back to the runtime,
-  int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
+  // that we can bump-pointer allocate from, fall back to the runtime.
+  int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
   GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
                                    IntPtrOrSmiConstant(max_size, mode)),
-         fail);
+         bailout);
 
   // Allocate the new backing store.
-  Node* new_elements = AllocateFixedArray(kind, new_capacity, mode);
+  Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
 
   // Fill in the added capacity in the new store with holes.
-  FillFixedArrayWithHole(kind, new_elements, capacity, new_capacity, mode);
+  FillFixedArrayWithValue(to_kind, new_elements, capacity, new_capacity,
+                          Heap::kTheHoleValueRootIndex, mode);
 
   // Copy the elements from the old elements store to the new.
-  CopyFixedArrayElements(kind, elements, new_elements, capacity,
-                         SKIP_WRITE_BARRIER, mode);
+  // The size-check above guarantees that the |new_elements| is allocated
+  // in new space so we can skip the write barrier.
+  CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity,
+                         new_capacity, SKIP_WRITE_BARRIER, mode);
 
+  StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+  Comment("] GrowElementsCapacity");
   return new_elements;
 }
 
@@ -1874,9 +2146,8 @@
 
     // Check if the {value} is already String.
     Label if_valueisnotstring(this, Label::kDeferred);
-    Branch(
-        Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
-        &if_valueisstring, &if_valueisnotstring);
+    Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
+           &if_valueisnotstring);
     Bind(&if_valueisnotstring);
     {
       // Check if the {value} is null.
@@ -1969,9 +2240,7 @@
               &done_loop);
           break;
         case PrimitiveType::kString:
-          GotoIf(Int32LessThan(value_instance_type,
-                               Int32Constant(FIRST_NONSTRING_TYPE)),
-                 &done_loop);
+          GotoIf(IsStringInstanceType(value_instance_type), &done_loop);
           break;
         case PrimitiveType::kSymbol:
           GotoIf(Word32Equal(value_instance_type, Int32Constant(SYMBOL_TYPE)),
@@ -1995,6 +2264,45 @@
   return var_value.value();
 }
 
+Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
+                                                InstanceType instance_type,
+                                                char const* method_name) {
+  Label out(this), throw_exception(this, Label::kDeferred);
+  Variable var_value_map(this, MachineRepresentation::kTagged);
+
+  GotoIf(WordIsSmi(value), &throw_exception);
+
+  // Load the instance type of the {value}.
+  var_value_map.Bind(LoadMap(value));
+  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+  Branch(Word32Equal(value_instance_type, Int32Constant(instance_type)), &out,
+         &throw_exception);
+
+  // The {value} is not a compatible receiver for this method.
+  Bind(&throw_exception);
+  CallRuntime(
+      Runtime::kThrowIncompatibleMethodReceiver, context,
+      HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+      value);
+  var_value_map.Bind(UndefinedConstant());
+  Goto(&out);  // Never reached.
+
+  Bind(&out);
+  return var_value_map.value();
+}
+
+Node* CodeStubAssembler::IsStringInstanceType(Node* instance_type) {
+  STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
+  return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+  return Int32GreaterThanOrEqual(instance_type,
+                                 Int32Constant(FIRST_JS_RECEIVER_TYPE));
+}
+
 Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
   // Translate the {index} into a Word.
   index = SmiToWord(index);
@@ -2102,14 +2410,14 @@
         Bind(&if_stringisexternal);
         {
           // Check if the {string} is a short external string.
-          Label if_stringisshort(this),
-              if_stringisnotshort(this, Label::kDeferred);
+          Label if_stringisnotshort(this),
+              if_stringisshort(this, Label::kDeferred);
           Branch(Word32Equal(Word32And(string_instance_type,
                                        Int32Constant(kShortExternalStringMask)),
                              Int32Constant(0)),
-                 &if_stringisshort, &if_stringisnotshort);
+                 &if_stringisnotshort, &if_stringisshort);
 
-          Bind(&if_stringisshort);
+          Bind(&if_stringisnotshort);
           {
             // Load the actual resource data from the {string}.
             Node* string_resource_data =
@@ -2139,7 +2447,7 @@
             }
           }
 
-          Bind(&if_stringisnotshort);
+          Bind(&if_stringisshort);
           {
             // The {string} might be compressed, call the runtime.
             var_result.Bind(SmiToWord32(
@@ -2224,6 +2532,586 @@
   return var_result.value();
 }
 
+namespace {
+
+// A wrapper around CopyStringCharacters which determines the correct string
+// encoding, allocates a corresponding sequential string, and then copies the
+// given character range using CopyStringCharacters.
+// |from_string| must be a sequential string. |from_index| and
+// |character_count| must be Smis s.t.
+// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
+                                   Node* from, Node* from_instance_type,
+                                   Node* from_index, Node* character_count) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label end(a), two_byte_sequential(a);
+  Variable var_result(a, MachineRepresentation::kTagged);
+
+  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+  a->GotoIf(a->Word32Equal(a->Word32And(from_instance_type,
+                                        a->Int32Constant(kStringEncodingMask)),
+                           a->Int32Constant(0)),
+            &two_byte_sequential);
+
+  // The subject string is a sequential one-byte string.
+  {
+    Node* result =
+        a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
+    a->CopyStringCharacters(from, result, from_index, character_count,
+                            String::ONE_BYTE_ENCODING);
+    var_result.Bind(result);
+
+    a->Goto(&end);
+  }
+
+  // The subject string is a sequential two-byte string.
+  a->Bind(&two_byte_sequential);
+  {
+    Node* result =
+        a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
+    a->CopyStringCharacters(from, result, from_index, character_count,
+                            String::TWO_BYTE_ENCODING);
+    var_result.Bind(result);
+
+    a->Goto(&end);
+  }
+
+  a->Bind(&end);
+  return var_result.value();
+}
+
+}  // namespace
+
+Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
+                                   Node* to) {
+  Label end(this);
+  Label runtime(this);
+
+  Variable var_instance_type(this, MachineRepresentation::kWord8);  // Int32.
+  Variable var_result(this, MachineRepresentation::kTagged);        // String.
+  Variable var_from(this, MachineRepresentation::kTagged);          // Smi.
+  Variable var_string(this, MachineRepresentation::kTagged);        // String.
+
+  var_instance_type.Bind(Int32Constant(0));
+  var_string.Bind(string);
+  var_from.Bind(from);
+
+  // Make sure first argument is a string.
+
+  // Bailout if receiver is a Smi.
+  GotoIf(WordIsSmi(string), &runtime);
+
+  // Load the instance type of the {string}.
+  Node* const instance_type = LoadInstanceType(string);
+  var_instance_type.Bind(instance_type);
+
+  // Check if {string} is a String.
+  GotoUnless(IsStringInstanceType(instance_type), &runtime);
+
+  // Make sure that both from and to are non-negative smis.
+
+  GotoUnless(WordIsPositiveSmi(from), &runtime);
+  GotoUnless(WordIsPositiveSmi(to), &runtime);
+
+  Node* const substr_length = SmiSub(to, from);
+  Node* const string_length = LoadStringLength(string);
+
+  // Begin dispatching based on substring length.
+
+  Label original_string_or_invalid_length(this);
+  GotoIf(SmiAboveOrEqual(substr_length, string_length),
+         &original_string_or_invalid_length);
+
+  // A real substring (substr_length < string_length).
+
+  Label single_char(this);
+  GotoIf(SmiEqual(substr_length, SmiConstant(Smi::FromInt(1))), &single_char);
+
+  // TODO(jgruber): Add an additional case for substring of length == 0?
+
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into var_string.
+
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  Label underlying_unpacked(this);
+  GotoIf(Word32Equal(
+             Word32And(instance_type, Int32Constant(kIsIndirectStringMask)),
+             Int32Constant(0)),
+         &underlying_unpacked);
+
+  // The subject string is either a sliced or cons string.
+
+  Label sliced_string(this);
+  GotoIf(Word32NotEqual(
+             Word32And(instance_type, Int32Constant(kSlicedNotConsMask)),
+             Int32Constant(0)),
+         &sliced_string);
+
+  // Cons string.  Check whether it is flat, then fetch first part.
+  // Flat cons strings have an empty second part.
+  {
+    GotoIf(WordNotEqual(LoadObjectField(string, ConsString::kSecondOffset),
+                        EmptyStringConstant()),
+           &runtime);
+
+    Node* first_string_part = LoadObjectField(string, ConsString::kFirstOffset);
+    var_string.Bind(first_string_part);
+    var_instance_type.Bind(LoadInstanceType(first_string_part));
+
+    Goto(&underlying_unpacked);
+  }
+
+  Bind(&sliced_string);
+  {
+    // Fetch parent and correct start index by offset.
+    Node* sliced_offset = LoadObjectField(string, SlicedString::kOffsetOffset);
+    var_from.Bind(SmiAdd(from, sliced_offset));
+
+    Node* slice_parent = LoadObjectField(string, SlicedString::kParentOffset);
+    var_string.Bind(slice_parent);
+
+    Node* slice_parent_instance_type = LoadInstanceType(slice_parent);
+    var_instance_type.Bind(slice_parent_instance_type);
+
+    Goto(&underlying_unpacked);
+  }
+
+  // The subject string can only be external or sequential string of either
+  // encoding at this point.
+  Label external_string(this);
+  Bind(&underlying_unpacked);
+  {
+    if (FLAG_string_slices) {
+      Label copy_routine(this);
+
+      // Short slice.  Copy instead of slicing.
+      GotoIf(SmiLessThan(substr_length,
+                         SmiConstant(Smi::FromInt(SlicedString::kMinLength))),
+             &copy_routine);
+
+      // Allocate new sliced string.
+
+      Label two_byte_slice(this);
+      STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+      STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+
+      Counters* counters = isolate()->counters();
+      IncrementCounter(counters->sub_string_native(), 1);
+
+      GotoIf(Word32Equal(Word32And(var_instance_type.value(),
+                                   Int32Constant(kStringEncodingMask)),
+                         Int32Constant(0)),
+             &two_byte_slice);
+
+      var_result.Bind(AllocateSlicedOneByteString(
+          substr_length, var_string.value(), var_from.value()));
+      Goto(&end);
+
+      Bind(&two_byte_slice);
+
+      var_result.Bind(AllocateSlicedTwoByteString(
+          substr_length, var_string.value(), var_from.value()));
+      Goto(&end);
+
+      Bind(&copy_routine);
+    }
+
+    // The subject string can only be external or sequential string of either
+    // encoding at this point.
+    STATIC_ASSERT(kExternalStringTag != 0);
+    STATIC_ASSERT(kSeqStringTag == 0);
+    GotoUnless(Word32Equal(Word32And(var_instance_type.value(),
+                                     Int32Constant(kExternalStringTag)),
+                           Int32Constant(0)),
+               &external_string);
+
+    var_result.Bind(AllocAndCopyStringCharacters(
+        this, context, var_string.value(), var_instance_type.value(),
+        var_from.value(), substr_length));
+
+    Counters* counters = isolate()->counters();
+    IncrementCounter(counters->sub_string_native(), 1);
+
+    Goto(&end);
+  }
+
+  // Handle external string.
+  Bind(&external_string);
+  {
+    // Rule out short external strings.
+    STATIC_ASSERT(kShortExternalStringTag != 0);
+    GotoIf(Word32NotEqual(Word32And(var_instance_type.value(),
+                                    Int32Constant(kShortExternalStringMask)),
+                          Int32Constant(0)),
+           &runtime);
+
+    // Move the pointer so that offset-wise, it looks like a sequential string.
+    STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
+                  SeqOneByteString::kHeaderSize);
+
+    Node* resource_data = LoadObjectField(var_string.value(),
+                                          ExternalString::kResourceDataOffset);
+    Node* const fake_sequential_string = IntPtrSub(
+        resource_data,
+        IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+    var_result.Bind(AllocAndCopyStringCharacters(
+        this, context, fake_sequential_string, var_instance_type.value(),
+        var_from.value(), substr_length));
+
+    Counters* counters = isolate()->counters();
+    IncrementCounter(counters->sub_string_native(), 1);
+
+    Goto(&end);
+  }
+
+  // Substrings of length 1 are generated through CharCodeAt and FromCharCode.
+  Bind(&single_char);
+  {
+    Node* char_code = StringCharCodeAt(var_string.value(), var_from.value());
+    var_result.Bind(StringFromCharCode(char_code));
+    Goto(&end);
+  }
+
+  Bind(&original_string_or_invalid_length);
+  {
+    // Longer than original string's length or negative: unsafe arguments.
+    GotoIf(SmiAbove(substr_length, string_length), &runtime);
+
+    // Equal length - check if {from, to} == {0, str.length}.
+    GotoIf(SmiAbove(from, SmiConstant(Smi::FromInt(0))), &runtime);
+
+    // Return the original string (substr_length == string_length).
+
+    Counters* counters = isolate()->counters();
+    IncrementCounter(counters->sub_string_native(), 1);
+
+    var_result.Bind(string);
+    Goto(&end);
+  }
+
+  // Fall back to a runtime call.
+  Bind(&runtime);
+  {
+    var_result.Bind(
+        CallRuntime(Runtime::kSubString, context, string, from, to));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
+                                             UnicodeEncoding encoding) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+  var_result.Bind(EmptyStringConstant());
+
+  Label if_isword16(this), if_isword32(this), return_result(this);
+
+  Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16,
+         &if_isword32);
+
+  Bind(&if_isword16);
+  {
+    var_result.Bind(StringFromCharCode(codepoint));
+    Goto(&return_result);
+  }
+
+  Bind(&if_isword32);
+  {
+    switch (encoding) {
+      case UnicodeEncoding::UTF16:
+        break;
+      case UnicodeEncoding::UTF32: {
+        // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+        Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10));
+
+        // lead = (codepoint >> 10) + LEAD_OFFSET
+        Node* lead =
+            Int32Add(WordShr(codepoint, Int32Constant(10)), lead_offset);
+
+        // trail = (codepoint & 0x3FF) + 0xDC00;
+        Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)),
+                               Int32Constant(0xDC00));
+
+        // codpoint = (trail << 16) | lead;
+        codepoint = Word32Or(WordShl(trail, Int32Constant(16)), lead);
+        break;
+      }
+    }
+
+    Node* value = AllocateSeqTwoByteString(2);
+    StoreNoWriteBarrier(
+        MachineRepresentation::kWord32, value,
+        IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+        codepoint);
+    var_result.Bind(value);
+    Goto(&return_result);
+  }
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
+  Label runtime(this, Label::kDeferred);
+  Label end(this);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  // Check if string has a cached array index.
+  Node* hash = LoadNameHashField(input);
+  Node* bit =
+      Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+  GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
+
+  var_result.Bind(SmiTag(BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+  Goto(&end);
+
+  Bind(&runtime);
+  {
+    var_result.Bind(CallRuntime(Runtime::kStringToNumber, context, input));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ToName(Node* context, Node* value) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label end(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  Label is_number(this);
+  GotoIf(WordIsSmi(value), &is_number);
+
+  Label not_name(this);
+  Node* value_instance_type = LoadInstanceType(value);
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  GotoIf(Int32GreaterThan(value_instance_type, Int32Constant(LAST_NAME_TYPE)),
+         &not_name);
+
+  var_result.Bind(value);
+  Goto(&end);
+
+  Bind(&is_number);
+  {
+    Callable callable = CodeFactory::NumberToString(isolate());
+    var_result.Bind(CallStub(callable, context, value));
+    Goto(&end);
+  }
+
+  Bind(&not_name);
+  {
+    GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+           &is_number);
+
+    Label not_oddball(this);
+    GotoIf(Word32NotEqual(value_instance_type, Int32Constant(ODDBALL_TYPE)),
+           &not_oddball);
+
+    var_result.Bind(LoadObjectField(value, Oddball::kToStringOffset));
+    Goto(&end);
+
+    Bind(&not_oddball);
+    {
+      var_result.Bind(CallRuntime(Runtime::kToName, context, value));
+      Goto(&end);
+    }
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
+  // Assert input is a HeapObject (not smi or heap number)
+  Assert(Word32BinaryNot(WordIsSmi(input)));
+  Assert(Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+
+  // We might need to loop once here due to ToPrimitive conversions.
+  Variable var_input(this, MachineRepresentation::kTagged);
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_input);
+  Label end(this);
+  var_input.Bind(input);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {input} value (known to be a HeapObject).
+    Node* input = var_input.value();
+
+    // Dispatch on the {input} instance type.
+    Node* input_instance_type = LoadInstanceType(input);
+    Label if_inputisstring(this), if_inputisoddball(this),
+        if_inputisreceiver(this, Label::kDeferred),
+        if_inputisother(this, Label::kDeferred);
+    GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
+    GotoIf(Word32Equal(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+           &if_inputisoddball);
+    Branch(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver,
+           &if_inputisother);
+
+    Bind(&if_inputisstring);
+    {
+      // The {input} is a String, use the fast stub to convert it to a Number.
+      var_result.Bind(StringToNumber(context, input));
+      Goto(&end);
+    }
+
+    Bind(&if_inputisoddball);
+    {
+      // The {input} is an Oddball, we just need to load the Number value of it.
+      var_result.Bind(LoadObjectField(input, Oddball::kToNumberOffset));
+      Goto(&end);
+    }
+
+    Bind(&if_inputisreceiver);
+    {
+      // The {input} is a JSReceiver, we need to convert it to a Primitive first
+      // using the ToPrimitive type conversion, preferably yielding a Number.
+      Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+          isolate(), ToPrimitiveHint::kNumber);
+      Node* result = CallStub(callable, context, input);
+
+      // Check if the {result} is already a Number.
+      Label if_resultisnumber(this), if_resultisnotnumber(this);
+      GotoIf(WordIsSmi(result), &if_resultisnumber);
+      Node* result_map = LoadMap(result);
+      Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
+             &if_resultisnotnumber);
+
+      Bind(&if_resultisnumber);
+      {
+        // The ToPrimitive conversion already gave us a Number, so we're done.
+        var_result.Bind(result);
+        Goto(&end);
+      }
+
+      Bind(&if_resultisnotnumber);
+      {
+        // We now have a Primitive {result}, but it's not yet a Number.
+        var_input.Bind(result);
+        Goto(&loop);
+      }
+    }
+
+    Bind(&if_inputisother);
+    {
+      // The {input} is something else (i.e. Symbol or Simd128Value), let the
+      // runtime figure out the correct exception.
+      // Note: We cannot tail call to the runtime here, as js-to-wasm
+      // trampolines also use this code currently, and they declare all
+      // outgoing parameters as untagged, while we would push a tagged
+      // object here.
+      var_result.Bind(CallRuntime(Runtime::kToNumber, context, input));
+      Goto(&end);
+    }
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label end(this);
+
+  Label not_smi(this, Label::kDeferred);
+  GotoUnless(WordIsSmi(input), &not_smi);
+  var_result.Bind(input);
+  Goto(&end);
+
+  Bind(&not_smi);
+  {
+    Label not_heap_number(this, Label::kDeferred);
+    Node* input_map = LoadMap(input);
+    GotoIf(Word32NotEqual(input_map, HeapNumberMapConstant()),
+           &not_heap_number);
+
+    var_result.Bind(input);
+    Goto(&end);
+
+    Bind(&not_heap_number);
+    {
+      var_result.Bind(NonNumberToNumber(context, input));
+      Goto(&end);
+    }
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
+                                   ToIntegerTruncationMode mode) {
+  // We might need to loop once for ToNumber conversion.
+  Variable var_arg(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_arg), out(this);
+  var_arg.Bind(input);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Shared entry points.
+    Label return_zero(this, Label::kDeferred);
+
+    // Load the current {arg} value.
+    Node* arg = var_arg.value();
+
+    // Check if {arg} is a Smi.
+    GotoIf(WordIsSmi(arg), &out);
+
+    // Check if {arg} is a HeapNumber.
+    Label if_argisheapnumber(this),
+        if_argisnotheapnumber(this, Label::kDeferred);
+    Branch(WordEqual(LoadMap(arg), HeapNumberMapConstant()),
+           &if_argisheapnumber, &if_argisnotheapnumber);
+
+    Bind(&if_argisheapnumber);
+    {
+      // Load the floating-point value of {arg}.
+      Node* arg_value = LoadHeapNumberValue(arg);
+
+      // Check if {arg} is NaN.
+      GotoUnless(Float64Equal(arg_value, arg_value), &return_zero);
+
+      // Truncate {arg} towards zero.
+      Node* value = Float64Trunc(arg_value);
+
+      if (mode == kTruncateMinusZero) {
+        // Truncate -0.0 to 0.
+        GotoIf(Float64Equal(value, Float64Constant(0.0)), &return_zero);
+      }
+
+      var_arg.Bind(ChangeFloat64ToTagged(value));
+      Goto(&out);
+    }
+
+    Bind(&if_argisnotheapnumber);
+    {
+      // Need to convert {arg} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(isolate());
+      var_arg.Bind(CallStub(callable, context, arg));
+      Goto(&loop);
+    }
+
+    Bind(&return_zero);
+    var_arg.Bind(SmiConstant(Smi::FromInt(0)));
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_arg.value();
+}
+
 Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
                                         uint32_t mask) {
   return Word32Shr(Word32And(word32, Int32Constant(mask)),
@@ -2265,54 +3153,49 @@
 void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
                                   Variable* var_index, Label* if_keyisunique,
                                   Label* if_bailout) {
-  DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+  DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
   Comment("TryToName");
 
-  Label if_keyissmi(this), if_keyisnotsmi(this);
-  Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
-  Bind(&if_keyissmi);
-  {
-    // Negative smi keys are named properties. Handle in the runtime.
-    GotoUnless(WordIsPositiveSmi(key), if_bailout);
+  Label if_hascachedindex(this), if_keyisnotindex(this);
+  // Handle Smi and HeapNumber keys.
+  var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
+  Goto(if_keyisindex);
 
-    var_index->Bind(SmiToWord32(key));
-    Goto(if_keyisindex);
-  }
-
-  Bind(&if_keyisnotsmi);
-
+  Bind(&if_keyisnotindex);
   Node* key_instance_type = LoadInstanceType(key);
   // Symbols are unique.
   GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
          if_keyisunique);
-
-  Label if_keyisinternalized(this);
-  Node* bits =
-      WordAnd(key_instance_type,
-              Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
-  Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
-         &if_keyisinternalized, if_bailout);
-  Bind(&if_keyisinternalized);
-
-  // Check whether the key is an array index passed in as string. Handle
-  // uniform with smi keys if so.
-  // TODO(verwaest): Also support non-internalized strings.
+  // Miss if |key| is not a String.
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  GotoUnless(IsStringInstanceType(key_instance_type), if_bailout);
+  // |key| is a String. Check if it has a cached array index.
   Node* hash = LoadNameHashField(key);
-  Node* bit = Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
-  GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_keyisunique);
-  // Key is an index. Check if it is small enough to be encoded in the
-  // hash_field. Handle too big array index in runtime.
-  bit = Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
-  GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_bailout);
+  Node* contains_index =
+      Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
+  GotoIf(Word32Equal(contains_index, Int32Constant(0)), &if_hascachedindex);
+  // No cached array index. If the string knows that it contains an index,
+  // then it must be an uncacheable index. Handle this case in the runtime.
+  Node* not_an_index =
+      Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
+  GotoIf(Word32Equal(not_an_index, Int32Constant(0)), if_bailout);
+  // Finally, check if |key| is internalized.
+  STATIC_ASSERT(kNotInternalizedTag != 0);
+  Node* not_internalized =
+      Word32And(key_instance_type, Int32Constant(kIsNotInternalizedMask));
+  GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)), if_bailout);
+  Goto(if_keyisunique);
+
+  Bind(&if_hascachedindex);
   var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
   Goto(if_keyisindex);
 }
 
 template <typename Dictionary>
 Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
-  Node* entry_index = Int32Mul(entry, Int32Constant(Dictionary::kEntrySize));
-  return Int32Add(entry_index,
-                  Int32Constant(Dictionary::kElementsStartIndex + field_index));
+  Node* entry_index = IntPtrMul(entry, IntPtrConstant(Dictionary::kEntrySize));
+  return IntPtrAdd(entry_index, IntPtrConstant(Dictionary::kElementsStartIndex +
+                                               field_index));
 }
 
 template <typename Dictionary>
@@ -2321,34 +3204,36 @@
                                              Variable* var_name_index,
                                              Label* if_not_found,
                                              int inlined_probes) {
-  DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+  DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
   Comment("NameDictionaryLookup");
 
-  Node* capacity = LoadAndUntagToWord32FixedArrayElement(
-      dictionary, Int32Constant(Dictionary::kCapacityIndex));
-  Node* mask = Int32Sub(capacity, Int32Constant(1));
-  Node* hash = LoadNameHash(unique_name);
+  Node* capacity = SmiUntag(LoadFixedArrayElement(
+      dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
+      INTPTR_PARAMETERS));
+  Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
+  Node* hash = ChangeUint32ToWord(LoadNameHash(unique_name));
 
   // See Dictionary::FirstProbe().
-  Node* count = Int32Constant(0);
-  Node* entry = Word32And(hash, mask);
+  Node* count = IntPtrConstant(0);
+  Node* entry = WordAnd(hash, mask);
 
   for (int i = 0; i < inlined_probes; i++) {
     Node* index = EntryToIndex<Dictionary>(entry);
     var_name_index->Bind(index);
 
-    Node* current = LoadFixedArrayElement(dictionary, index);
+    Node* current =
+        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
     GotoIf(WordEqual(current, unique_name), if_found);
 
     // See Dictionary::NextProbe().
-    count = Int32Constant(i + 1);
-    entry = Word32And(Int32Add(entry, count), mask);
+    count = IntPtrConstant(i + 1);
+    entry = WordAnd(IntPtrAdd(entry, count), mask);
   }
 
   Node* undefined = UndefinedConstant();
 
-  Variable var_count(this, MachineRepresentation::kWord32);
-  Variable var_entry(this, MachineRepresentation::kWord32);
+  Variable var_count(this, MachineType::PointerRepresentation());
+  Variable var_entry(this, MachineType::PointerRepresentation());
   Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
   Label loop(this, 3, loop_vars);
   var_count.Bind(count);
@@ -2362,13 +3247,14 @@
     Node* index = EntryToIndex<Dictionary>(entry);
     var_name_index->Bind(index);
 
-    Node* current = LoadFixedArrayElement(dictionary, index);
+    Node* current =
+        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
     GotoIf(WordEqual(current, undefined), if_not_found);
     GotoIf(WordEqual(current, unique_name), if_found);
 
     // See Dictionary::NextProbe().
-    count = Int32Add(count, Int32Constant(1));
-    entry = Word32And(Int32Add(entry, count), mask);
+    count = IntPtrAdd(count, IntPtrConstant(1));
+    entry = WordAnd(IntPtrAdd(entry, count), mask);
 
     var_count.Bind(count);
     var_entry.Bind(entry);
@@ -2397,34 +3283,36 @@
 }
 
 template <typename Dictionary>
-void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
+void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
+                                               Node* intptr_index,
                                                Label* if_found,
                                                Variable* var_entry,
                                                Label* if_not_found) {
-  DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
+  DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
   Comment("NumberDictionaryLookup");
 
-  Node* capacity = LoadAndUntagToWord32FixedArrayElement(
-      dictionary, Int32Constant(Dictionary::kCapacityIndex));
-  Node* mask = Int32Sub(capacity, Int32Constant(1));
+  Node* capacity = SmiUntag(LoadFixedArrayElement(
+      dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
+      INTPTR_PARAMETERS));
+  Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
 
-  Node* seed;
+  Node* int32_seed;
   if (Dictionary::ShapeT::UsesSeed) {
-    seed = HashSeed();
+    int32_seed = HashSeed();
   } else {
-    seed = Int32Constant(kZeroHashSeed);
+    int32_seed = Int32Constant(kZeroHashSeed);
   }
-  Node* hash = ComputeIntegerHash(key, seed);
-  Node* key_as_float64 = ChangeUint32ToFloat64(key);
+  Node* hash = ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed));
+  Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
 
   // See Dictionary::FirstProbe().
-  Node* count = Int32Constant(0);
-  Node* entry = Word32And(hash, mask);
+  Node* count = IntPtrConstant(0);
+  Node* entry = WordAnd(hash, mask);
 
   Node* undefined = UndefinedConstant();
   Node* the_hole = TheHoleConstant();
 
-  Variable var_count(this, MachineRepresentation::kWord32);
+  Variable var_count(this, MachineType::PointerRepresentation());
   Variable* loop_vars[] = {&var_count, var_entry};
   Label loop(this, 2, loop_vars);
   var_count.Bind(count);
@@ -2436,7 +3324,8 @@
     Node* entry = var_entry->value();
 
     Node* index = EntryToIndex<Dictionary>(entry);
-    Node* current = LoadFixedArrayElement(dictionary, index);
+    Node* current =
+        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
     GotoIf(WordEqual(current, undefined), if_not_found);
     Label next_probe(this);
     {
@@ -2444,8 +3333,8 @@
       Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
       Bind(&if_currentissmi);
       {
-        Node* current_value = SmiToWord32(current);
-        Branch(Word32Equal(current_value, key), if_found, &next_probe);
+        Node* current_value = SmiUntag(current);
+        Branch(WordEqual(current_value, intptr_index), if_found, &next_probe);
       }
       Bind(&if_currentisnotsmi);
       {
@@ -2459,8 +3348,8 @@
 
     Bind(&next_probe);
     // See Dictionary::NextProbe().
-    count = Int32Add(count, Int32Constant(1));
-    entry = Word32And(Int32Add(entry, count), mask);
+    count = IntPtrAdd(count, IntPtrConstant(1));
+    entry = WordAnd(IntPtrAdd(entry, count), mask);
 
     var_count.Bind(count);
     var_entry->Bind(entry);
@@ -2468,13 +3357,39 @@
   }
 }
 
+void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
+                                               Node* descriptors, Node* nof,
+                                               Label* if_found,
+                                               Variable* var_name_index,
+                                               Label* if_not_found) {
+  Variable var_descriptor(this, MachineType::PointerRepresentation());
+  Label loop(this, &var_descriptor);
+  var_descriptor.Bind(IntPtrConstant(0));
+  Goto(&loop);
+
+  Bind(&loop);
+  {
+    Node* index = var_descriptor.value();
+    Node* name_offset = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
+    Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+    GotoIf(WordEqual(index, nof), if_not_found);
+    Node* name_index = IntPtrAdd(name_offset, IntPtrMul(index, factor));
+    Node* candidate_name =
+        LoadFixedArrayElement(descriptors, name_index, 0, INTPTR_PARAMETERS);
+    var_name_index->Bind(name_index);
+    GotoIf(WordEqual(candidate_name, unique_name), if_found);
+    var_descriptor.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+    Goto(&loop);
+  }
+}
+
 void CodeStubAssembler::TryLookupProperty(
     Node* object, Node* map, Node* instance_type, Node* unique_name,
     Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
     Variable* var_meta_storage, Variable* var_name_index, Label* if_not_found,
     Label* if_bailout) {
   DCHECK_EQ(MachineRepresentation::kTagged, var_meta_storage->rep());
-  DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+  DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
 
   Label if_objectisspecial(this);
   STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
@@ -2494,36 +3409,18 @@
   Bind(&if_isfastmap);
   {
     Comment("DescriptorArrayLookup");
-    Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+    Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bit_field3);
     // Bail out to the runtime for large numbers of own descriptors. The stub
     // only does linear search, which becomes too expensive in that case.
     {
       static const int32_t kMaxLinear = 210;
-      GotoIf(Int32GreaterThan(nof, Int32Constant(kMaxLinear)), if_bailout);
+      GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), if_bailout);
     }
     Node* descriptors = LoadMapDescriptors(map);
     var_meta_storage->Bind(descriptors);
 
-    Variable var_descriptor(this, MachineRepresentation::kWord32);
-    Label loop(this, &var_descriptor);
-    var_descriptor.Bind(Int32Constant(0));
-    Goto(&loop);
-    Bind(&loop);
-    {
-      Node* index = var_descriptor.value();
-      Node* name_offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
-      Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
-      GotoIf(Word32Equal(index, nof), if_not_found);
-
-      Node* name_index = Int32Add(name_offset, Int32Mul(index, factor));
-      Node* name = LoadFixedArrayElement(descriptors, name_index);
-
-      var_name_index->Bind(name_index);
-      GotoIf(WordEqual(name, unique_name), if_found_fast);
-
-      var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
-      Goto(&loop);
-    }
+    DescriptorLookupLinear(unique_name, descriptors, nof, if_found_fast,
+                           var_name_index, if_not_found);
   }
   Bind(&if_isslowmap);
   {
@@ -2562,7 +3459,7 @@
                                           Label* if_bailout) {
   Comment("TryHasOwnProperty");
   Variable var_meta_storage(this, MachineRepresentation::kTagged);
-  Variable var_name_index(this, MachineRepresentation::kWord32);
+  Variable var_name_index(this, MachineType::PointerRepresentation());
 
   Label if_found_global(this);
   TryLookupProperty(object, map, instance_type, unique_name, if_found, if_found,
@@ -2608,7 +3505,7 @@
   Bind(&if_in_field);
   {
     Node* field_index =
-        BitFieldDecode<PropertyDetails::FieldIndexField>(details);
+        BitFieldDecodeWord<PropertyDetails::FieldIndexField>(details);
     Node* representation =
         BitFieldDecode<PropertyDetails::RepresentationField>(details);
 
@@ -2617,15 +3514,15 @@
     Label if_inobject(this), if_backing_store(this);
     Variable var_double_value(this, MachineRepresentation::kFloat64);
     Label rebox_double(this, &var_double_value);
-    BranchIfInt32LessThan(field_index, inobject_properties, &if_inobject,
-                          &if_backing_store);
+    BranchIfUintPtrLessThan(field_index, inobject_properties, &if_inobject,
+                            &if_backing_store);
     Bind(&if_inobject);
     {
       Comment("if_inobject");
-      Node* field_offset = ChangeInt32ToIntPtr(
-          Int32Mul(Int32Sub(LoadMapInstanceSize(map),
-                            Int32Sub(inobject_properties, field_index)),
-                   Int32Constant(kPointerSize)));
+      Node* field_offset =
+          IntPtrMul(IntPtrSub(LoadMapInstanceSize(map),
+                              IntPtrSub(inobject_properties, field_index)),
+                    IntPtrConstant(kPointerSize));
 
       Label if_double(this), if_tagged(this);
       BranchIfWord32NotEqual(representation,
@@ -2652,7 +3549,7 @@
     {
       Comment("if_backing_store");
       Node* properties = LoadProperties(object);
-      field_index = Int32Sub(field_index, inobject_properties);
+      field_index = IntPtrSub(field_index, inobject_properties);
       Node* value = LoadFixedArrayElement(properties, field_index);
 
       Label if_double(this), if_tagged(this);
@@ -2739,6 +3636,52 @@
   Comment("] LoadPropertyFromGlobalDictionary");
 }
 
+// |value| is the property backing store's contents, which is either a value
+// or an accessor pair, as specified by |details|.
+// Returns either the original value, or the result of the getter call.
+Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
+                                              Node* context, Node* receiver,
+                                              Label* if_bailout) {
+  Variable var_value(this, MachineRepresentation::kTagged);
+  var_value.Bind(value);
+  Label done(this);
+
+  Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+  GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
+
+  // Accessor case.
+  {
+    Node* accessor_pair = value;
+    GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
+                       Int32Constant(ACCESSOR_INFO_TYPE)),
+           if_bailout);
+    AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+    Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+    Node* getter_map = LoadMap(getter);
+    Node* instance_type = LoadMapInstanceType(getter_map);
+    // FunctionTemplateInfo getters are not supported yet.
+    GotoIf(
+        Word32Equal(instance_type, Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
+        if_bailout);
+
+    // Return undefined if the {getter} is not callable.
+    var_value.Bind(UndefinedConstant());
+    GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
+                                 Int32Constant(1 << Map::kIsCallable)),
+                       Int32Constant(0)),
+           &done);
+
+    // Call the accessor.
+    Callable callable = CodeFactory::Call(isolate());
+    Node* result = CallJS(callable, context, getter, receiver);
+    var_value.Bind(result);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return var_value.value();
+}
+
 void CodeStubAssembler::TryGetOwnProperty(
     Node* context, Node* receiver, Node* object, Node* map, Node* instance_type,
     Node* unique_name, Label* if_found_value, Variable* var_value,
@@ -2747,7 +3690,7 @@
   Comment("TryGetOwnProperty");
 
   Variable var_meta_storage(this, MachineRepresentation::kTagged);
-  Variable var_entry(this, MachineRepresentation::kWord32);
+  Variable var_entry(this, MachineType::PointerRepresentation());
 
   Label if_found_fast(this), if_found_dict(this), if_found_global(this);
 
@@ -2786,59 +3729,28 @@
   // Here we have details and value which could be an accessor.
   Bind(&if_found);
   {
-    Node* details = var_details.value();
-    Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
-
-    Label if_accessor(this);
-    Branch(Word32Equal(kind, Int32Constant(kData)), if_found_value,
-           &if_accessor);
-    Bind(&if_accessor);
-    {
-      Node* accessor_pair = var_value->value();
-      GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
-                         Int32Constant(ACCESSOR_INFO_TYPE)),
-             if_bailout);
-      AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
-      Node* getter =
-          LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
-      Node* getter_map = LoadMap(getter);
-      Node* instance_type = LoadMapInstanceType(getter_map);
-      // FunctionTemplateInfo getters are not supported yet.
-      GotoIf(Word32Equal(instance_type,
-                         Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
-             if_bailout);
-
-      // Return undefined if the {getter} is not callable.
-      var_value->Bind(UndefinedConstant());
-      GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
-                                   Int32Constant(1 << Map::kIsCallable)),
-                         Int32Constant(0)),
-             if_found_value);
-
-      // Call the accessor.
-      Callable callable = CodeFactory::Call(isolate());
-      Node* result = CallJS(callable, context, getter, receiver);
-      var_value->Bind(result);
-      Goto(if_found_value);
-    }
+    Node* value = CallGetterIfAccessor(var_value->value(), var_details.value(),
+                                       context, receiver, if_bailout);
+    var_value->Bind(value);
+    Goto(if_found_value);
   }
 }
 
 void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
-                                         Node* instance_type, Node* index,
-                                         Label* if_found, Label* if_not_found,
+                                         Node* instance_type,
+                                         Node* intptr_index, Label* if_found,
+                                         Label* if_not_found,
                                          Label* if_bailout) {
   // Handle special objects in runtime.
   GotoIf(Int32LessThanOrEqual(instance_type,
                               Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
          if_bailout);
 
-  Node* bit_field2 = LoadMapBitField2(map);
-  Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements_kind = LoadMapElementsKind(map);
 
   // TODO(verwaest): Support other elements kinds as well.
   Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
-      if_isfaststringwrapper(this), if_isslowstringwrapper(this);
+      if_isfaststringwrapper(this), if_isslowstringwrapper(this), if_oob(this);
   // clang-format off
   int32_t values[] = {
       // Handled by {if_isobjectorsmi}.
@@ -2873,9 +3785,10 @@
     Node* elements = LoadElements(object);
     Node* length = LoadAndUntagFixedArrayBaseLength(elements);
 
-    GotoUnless(Uint32LessThan(index, length), if_not_found);
+    GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
 
-    Node* element = LoadFixedArrayElement(elements, index);
+    Node* element =
+        LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
     Node* the_hole = TheHoleConstant();
     Branch(WordEqual(element, the_hole), if_not_found, if_found);
   }
@@ -2884,48 +3797,45 @@
     Node* elements = LoadElements(object);
     Node* length = LoadAndUntagFixedArrayBaseLength(elements);
 
-    GotoUnless(Uint32LessThan(index, length), if_not_found);
+    GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
 
-    if (kPointerSize == kDoubleSize) {
-      Node* element =
-          LoadFixedDoubleArrayElement(elements, index, MachineType::Uint64());
-      Node* the_hole = Int64Constant(kHoleNanInt64);
-      Branch(Word64Equal(element, the_hole), if_not_found, if_found);
-    } else {
-      Node* element_upper =
-          LoadFixedDoubleArrayElement(elements, index, MachineType::Uint32(),
-                                      kIeeeDoubleExponentWordOffset);
-      Branch(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
-             if_not_found, if_found);
-    }
+    // Check if the element is a double hole, but don't load it.
+    LoadFixedDoubleArrayElement(elements, intptr_index, MachineType::None(), 0,
+                                INTPTR_PARAMETERS, if_not_found);
+    Goto(if_found);
   }
   Bind(&if_isdictionary);
   {
-    Variable var_entry(this, MachineRepresentation::kWord32);
+    Variable var_entry(this, MachineType::PointerRepresentation());
     Node* elements = LoadElements(object);
-    NumberDictionaryLookup<SeededNumberDictionary>(elements, index, if_found,
-                                                   &var_entry, if_not_found);
+    NumberDictionaryLookup<SeededNumberDictionary>(
+        elements, intptr_index, if_found, &var_entry, if_not_found);
   }
   Bind(&if_isfaststringwrapper);
   {
     AssertInstanceType(object, JS_VALUE_TYPE);
     Node* string = LoadJSValueValue(object);
-    Assert(Int32LessThan(LoadInstanceType(string),
-                         Int32Constant(FIRST_NONSTRING_TYPE)));
+    Assert(IsStringInstanceType(LoadInstanceType(string)));
     Node* length = LoadStringLength(string);
-    GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+    GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
     Goto(&if_isobjectorsmi);
   }
   Bind(&if_isslowstringwrapper);
   {
     AssertInstanceType(object, JS_VALUE_TYPE);
     Node* string = LoadJSValueValue(object);
-    Assert(Int32LessThan(LoadInstanceType(string),
-                         Int32Constant(FIRST_NONSTRING_TYPE)));
+    Assert(IsStringInstanceType(LoadInstanceType(string)));
     Node* length = LoadStringLength(string);
-    GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+    GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
     Goto(&if_isdictionary);
   }
+  Bind(&if_oob);
+  {
+    // Positive OOB indices mean "not found", negative indices must be
+    // converted to property names.
+    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), if_bailout);
+    Goto(if_not_found);
+  }
 }
 
 // Instantiate template methods to workaround GCC compilation issue.
@@ -2955,7 +3865,7 @@
     Bind(&if_objectisreceiver);
   }
 
-  Variable var_index(this, MachineRepresentation::kWord32);
+  Variable var_index(this, MachineType::PointerRepresentation());
 
   Label if_keyisindex(this), if_iskeyunique(this);
   TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, if_bailout);
@@ -3183,19 +4093,22 @@
                                                           ElementsKind kind,
                                                           ParameterMode mode,
                                                           int base_size) {
-  bool is_double = IsFastDoubleElementsKind(kind);
-  int element_size_shift = is_double ? kDoubleSizeLog2 : kPointerSizeLog2;
+  int element_size_shift = ElementsKindToShiftSize(kind);
   int element_size = 1 << element_size_shift;
   int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
-  int32_t index = 0;
+  intptr_t index = 0;
   bool constant_index = false;
   if (mode == SMI_PARAMETERS) {
     element_size_shift -= kSmiShiftBits;
-    intptr_t temp = 0;
-    constant_index = ToIntPtrConstant(index_node, temp);
-    index = temp >> kSmiShiftBits;
+    constant_index = ToIntPtrConstant(index_node, index);
+    index = index >> kSmiShiftBits;
+  } else if (mode == INTEGER_PARAMETERS) {
+    int32_t temp = 0;
+    constant_index = ToInt32Constant(index_node, temp);
+    index = static_cast<intptr_t>(temp);
   } else {
-    constant_index = ToInt32Constant(index_node, index);
+    DCHECK(mode == INTPTR_PARAMETERS);
+    constant_index = ToIntPtrConstant(index_node, index);
   }
   if (constant_index) {
     return IntPtrConstant(base_size + element_size * index);
@@ -3225,32 +4138,16 @@
 void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
                                        compiler::Node* type_feedback_vector,
                                        compiler::Node* slot_id) {
-  Label combine_feedback(this), record_feedback(this), end(this);
-
+  // This method is used for binary op and compare feedback. These
+  // vector nodes are initialized with a smi 0, so we can simply OR
+  // our new feedback in place.
+  // TODO(interpreter): Consider passing the feedback as Smi already to avoid
+  // the tagging completely.
   Node* previous_feedback =
       LoadFixedArrayElement(type_feedback_vector, slot_id);
-  Node* is_uninitialized = WordEqual(
-      previous_feedback,
-      HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
-  BranchIf(is_uninitialized, &record_feedback, &combine_feedback);
-
-  Bind(&record_feedback);
-  {
-    StoreFixedArrayElement(type_feedback_vector, slot_id, SmiTag(feedback),
-                           SKIP_WRITE_BARRIER);
-    Goto(&end);
-  }
-
-  Bind(&combine_feedback);
-  {
-    Node* untagged_previous_feedback = SmiUntag(previous_feedback);
-    Node* combined_feedback = Word32Or(untagged_previous_feedback, feedback);
-    StoreFixedArrayElement(type_feedback_vector, slot_id,
-                           SmiTag(combined_feedback), SKIP_WRITE_BARRIER);
-    Goto(&end);
-  }
-
-  Bind(&end);
+  Node* combined_feedback = SmiOr(previous_feedback, SmiFromWord32(feedback));
+  StoreFixedArrayElement(type_feedback_vector, slot_id, combined_feedback,
+                         SKIP_WRITE_BARRIER);
 }
 
 compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
@@ -3275,23 +4172,23 @@
 }
 
 compiler::Node* CodeStubAssembler::TryMonomorphicCase(
-    const LoadICParameters* p, compiler::Node* receiver_map, Label* if_handler,
-    Variable* var_handler, Label* if_miss) {
+    compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
+    Label* if_handler, Variable* var_handler, Label* if_miss) {
   DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
 
   // TODO(ishell): add helper class that hides offset computations for a series
   // of loads.
   int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
-  Node* offset = ElementOffsetFromIndex(p->slot, FAST_HOLEY_ELEMENTS,
+  Node* offset = ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS,
                                         SMI_PARAMETERS, header_size);
-  Node* feedback = Load(MachineType::AnyTagged(), p->vector, offset);
+  Node* feedback = Load(MachineType::AnyTagged(), vector, offset);
 
   // Try to quickly handle the monomorphic case without knowing for sure
   // if we have a weak cell in feedback. We do know it's safe to look
   // at WeakCell::kValueOffset.
   GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
 
-  Node* handler = Load(MachineType::AnyTagged(), p->vector,
+  Node* handler = Load(MachineType::AnyTagged(), vector,
                        IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
 
   var_handler->Bind(handler);
@@ -3300,9 +4197,8 @@
 }
 
 void CodeStubAssembler::HandlePolymorphicCase(
-    const LoadICParameters* p, compiler::Node* receiver_map,
-    compiler::Node* feedback, Label* if_handler, Variable* var_handler,
-    Label* if_miss, int unroll_count) {
+    compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
+    Variable* var_handler, Label* if_miss, int unroll_count) {
   DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
 
   // Iterate {feedback} array.
@@ -3310,13 +4206,13 @@
 
   for (int i = 0; i < unroll_count; i++) {
     Label next_entry(this);
-    Node* cached_map = LoadWeakCellValue(
-        LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize)));
+    Node* cached_map = LoadWeakCellValue(LoadFixedArrayElement(
+        feedback, IntPtrConstant(i * kEntrySize), 0, INTPTR_PARAMETERS));
     GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
 
     // Found, now call handler.
-    Node* handler =
-        LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize + 1));
+    Node* handler = LoadFixedArrayElement(
+        feedback, IntPtrConstant(i * kEntrySize + 1), 0, INTPTR_PARAMETERS);
     var_handler->Bind(handler);
     Goto(if_handler);
 
@@ -3325,28 +4221,29 @@
   Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
 
   // Loop from {unroll_count}*kEntrySize to {length}.
-  Variable var_index(this, MachineRepresentation::kWord32);
+  Variable var_index(this, MachineType::PointerRepresentation());
   Label loop(this, &var_index);
-  var_index.Bind(Int32Constant(unroll_count * kEntrySize));
+  var_index.Bind(IntPtrConstant(unroll_count * kEntrySize));
   Goto(&loop);
   Bind(&loop);
   {
     Node* index = var_index.value();
-    GotoIf(Int32GreaterThanOrEqual(index, length), if_miss);
+    GotoIf(UintPtrGreaterThanOrEqual(index, length), if_miss);
 
-    Node* cached_map =
-        LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+    Node* cached_map = LoadWeakCellValue(
+        LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
 
     Label next_entry(this);
     GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
 
     // Found, now call handler.
-    Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+    Node* handler =
+        LoadFixedArrayElement(feedback, index, kPointerSize, INTPTR_PARAMETERS);
     var_handler->Bind(handler);
     Goto(if_handler);
 
     Bind(&next_entry);
-    var_index.Bind(Int32Add(index, Int32Constant(kEntrySize)));
+    var_index.Bind(IntPtrAdd(index, IntPtrConstant(kEntrySize)));
     Goto(&loop);
   }
 }
@@ -3357,7 +4254,7 @@
   STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
   // Compute the hash of the name (use entire hash field).
   Node* hash_field = LoadNameHashField(name);
-  Assert(WordEqual(
+  Assert(Word32Equal(
       Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
       Int32Constant(0)));
 
@@ -3369,7 +4266,7 @@
   hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
   uint32_t mask = (StubCache::kPrimaryTableSize - 1)
                   << StubCache::kCacheIndexShift;
-  return Word32And(hash, Int32Constant(mask));
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
 }
 
 compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
@@ -3381,7 +4278,7 @@
   hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
   int32_t mask = (StubCache::kSecondaryTableSize - 1)
                  << StubCache::kCacheIndexShift;
-  return Word32And(hash, Int32Constant(mask));
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
 }
 
 enum CodeStubAssembler::StubCacheTable : int {
@@ -3406,7 +4303,7 @@
   // The {table_offset} holds the entry offset times four (due to masking
   // and shifting optimizations).
   const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
-  entry_offset = Int32Mul(entry_offset, Int32Constant(kMultiplier));
+  entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
 
   // Check that the key in the entry matches the name.
   Node* key_base =
@@ -3419,13 +4316,13 @@
                                   stub_cache->key_reference(table).address());
   Node* entry_map =
       Load(MachineType::Pointer(), key_base,
-           Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
+           IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
   GotoIf(WordNotEqual(map, entry_map), if_miss);
 
   DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
                               stub_cache->key_reference(table).address());
   Node* code = Load(MachineType::Pointer(), key_base,
-                    Int32Add(entry_offset, Int32Constant(kPointerSize)));
+                    IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
 
   // We found the handler.
   var_handler->Bind(code);
@@ -3489,41 +4386,43 @@
   return var_intptr_key.value();
 }
 
-// |is_jsarray| should be non-zero for JSArrays.
-void CodeStubAssembler::EmitBoundsCheck(Node* object, Node* elements,
-                                        Node* intptr_key, Node* is_jsarray,
-                                        Label* miss) {
-  Variable var_length(this, MachineRepresentation::kTagged);
+void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
+                                                    Node* elements,
+                                                    Node* intptr_index,
+                                                    Node* is_jsarray_condition,
+                                                    Label* miss) {
+  Variable var_length(this, MachineType::PointerRepresentation());
   Label if_array(this), length_loaded(this, &var_length);
-  GotoUnless(WordEqual(is_jsarray, IntPtrConstant(0)), &if_array);
+  GotoIf(is_jsarray_condition, &if_array);
   {
     var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
     Goto(&length_loaded);
   }
   Bind(&if_array);
   {
-    var_length.Bind(SmiUntag(LoadObjectField(object, JSArray::kLengthOffset)));
+    var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
     Goto(&length_loaded);
   }
   Bind(&length_loaded);
-  GotoUnless(UintPtrLessThan(intptr_key, var_length.value()), miss);
+  GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
 }
 
-// |key| should be untagged (int32).
 void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
-                                        Node* elements_kind, Node* key,
+                                        Node* elements_kind, Node* intptr_index,
+                                        Node* is_jsarray_condition,
                                         Label* if_hole, Label* rebox_double,
                                         Variable* var_double_value,
-                                        Label* miss) {
+                                        Label* unimplemented_elements_kind,
+                                        Label* out_of_bounds, Label* miss) {
   Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
-      if_fast_double(this), if_fast_holey_double(this),
-      unimplemented_elements_kind(this);
-  STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+      if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
+      if_dictionary(this), unreachable(this);
   GotoIf(
-      IntPtrGreaterThanOrEqual(
-          elements_kind, IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
-      &if_typed_array);
+      IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
+      &if_nonfast);
 
+  EmitFastElementsBoundsCheck(object, elements, intptr_index,
+                              is_jsarray_condition, out_of_bounds);
   int32_t kinds[] = {// Handled by if_fast_packed.
                      FAST_SMI_ELEMENTS, FAST_ELEMENTS,
                      // Handled by if_fast_holey.
@@ -3540,28 +4439,20 @@
                      &if_fast_double,
                      // FAST_HOLEY_DOUBLE_ELEMENTS
                      &if_fast_holey_double};
-  Switch(elements_kind, &unimplemented_elements_kind, kinds, labels,
+  Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
          arraysize(kinds));
-  Bind(&unimplemented_elements_kind);
-  {
-    // Crash if we get here.
-    DebugBreak();
-    Goto(miss);
-  }
 
   Bind(&if_fast_packed);
   {
     Comment("fast packed elements");
-    // TODO(jkummerow): The Load*Element helpers add movsxlq instructions
-    // on x64 which we don't need here, because |key| is an IntPtr already.
-    // Do something about that.
-    Return(LoadFixedArrayElement(elements, key));
+    Return(LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS));
   }
 
   Bind(&if_fast_holey);
   {
     Comment("fast holey elements");
-    Node* element = LoadFixedArrayElement(elements, key);
+    Node* element =
+        LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
     GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
     Return(element);
   }
@@ -3569,30 +4460,56 @@
   Bind(&if_fast_double);
   {
     Comment("packed double elements");
-    var_double_value->Bind(
-        LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+    var_double_value->Bind(LoadFixedDoubleArrayElement(
+        elements, intptr_index, MachineType::Float64(), 0, INTPTR_PARAMETERS));
     Goto(rebox_double);
   }
 
   Bind(&if_fast_holey_double);
   {
     Comment("holey double elements");
-    if (kPointerSize == kDoubleSize) {
-      Node* raw_element =
-          LoadFixedDoubleArrayElement(elements, key, MachineType::Uint64());
-      Node* the_hole = Int64Constant(kHoleNanInt64);
-      GotoIf(Word64Equal(raw_element, the_hole), if_hole);
-    } else {
-      Node* element_upper = LoadFixedDoubleArrayElement(
-          elements, key, MachineType::Uint32(), kIeeeDoubleExponentWordOffset);
-      GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
-             if_hole);
-    }
-    var_double_value->Bind(
-        LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+    Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
+                                              MachineType::Float64(), 0,
+                                              INTPTR_PARAMETERS, if_hole);
+    var_double_value->Bind(value);
     Goto(rebox_double);
   }
 
+  Bind(&if_nonfast);
+  {
+    STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    GotoIf(IntPtrGreaterThanOrEqual(
+               elements_kind,
+               IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+           &if_typed_array);
+    GotoIf(IntPtrEqual(elements_kind, IntPtrConstant(DICTIONARY_ELEMENTS)),
+           &if_dictionary);
+    Goto(unimplemented_elements_kind);
+  }
+
+  Bind(&if_dictionary);
+  {
+    Comment("dictionary elements");
+    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+    Variable var_entry(this, MachineType::PointerRepresentation());
+    Label if_found(this);
+    NumberDictionaryLookup<SeededNumberDictionary>(
+        elements, intptr_index, &if_found, &var_entry, if_hole);
+    Bind(&if_found);
+    // Check that the value is a data property.
+    Node* details_index = EntryToIndex<SeededNumberDictionary>(
+        var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
+    Node* details = SmiToWord32(
+        LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
+    Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+    // TODO(jkummerow): Support accessors without missing?
+    GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
+    // Finally, load the value.
+    Node* value_index = EntryToIndex<SeededNumberDictionary>(
+        var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
+    Return(LoadFixedArrayElement(elements, value_index, 0, INTPTR_PARAMETERS));
+  }
+
   Bind(&if_typed_array);
   {
     Comment("typed elements");
@@ -3603,6 +4520,12 @@
     Node* neutered_bit =
         Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
     GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), miss);
+
+    // Bounds check.
+    Node* length =
+        SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
+    GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
+
     // Backing store = external_pointer + base_pointer.
     Node* external_pointer =
         LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
@@ -3632,43 +4555,43 @@
     Bind(&uint8_elements);
     {
       Comment("UINT8_ELEMENTS");  // Handles UINT8_CLAMPED_ELEMENTS too.
-      Return(SmiTag(Load(MachineType::Uint8(), backing_store, key)));
+      Return(SmiTag(Load(MachineType::Uint8(), backing_store, intptr_index)));
     }
     Bind(&int8_elements);
     {
       Comment("INT8_ELEMENTS");
-      Return(SmiTag(Load(MachineType::Int8(), backing_store, key)));
+      Return(SmiTag(Load(MachineType::Int8(), backing_store, intptr_index)));
     }
     Bind(&uint16_elements);
     {
       Comment("UINT16_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(1));
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
       Return(SmiTag(Load(MachineType::Uint16(), backing_store, index)));
     }
     Bind(&int16_elements);
     {
       Comment("INT16_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(1));
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
       Return(SmiTag(Load(MachineType::Int16(), backing_store, index)));
     }
     Bind(&uint32_elements);
     {
       Comment("UINT32_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(2));
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
       Node* element = Load(MachineType::Uint32(), backing_store, index);
       Return(ChangeUint32ToTagged(element));
     }
     Bind(&int32_elements);
     {
       Comment("INT32_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(2));
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
       Node* element = Load(MachineType::Int32(), backing_store, index);
       Return(ChangeInt32ToTagged(element));
     }
     Bind(&float32_elements);
     {
       Comment("FLOAT32_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(2));
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
       Node* element = Load(MachineType::Float32(), backing_store, index);
       var_double_value->Bind(ChangeFloat32ToFloat64(element));
       Goto(rebox_double);
@@ -3676,7 +4599,7 @@
     Bind(&float64_elements);
     {
       Comment("FLOAT64_ELEMENTS");
-      Node* index = WordShl(key, IntPtrConstant(3));
+      Node* index = WordShl(intptr_index, IntPtrConstant(3));
       Node* element = Load(MachineType::Float64(), backing_store, index);
       var_double_value->Bind(element);
       Goto(rebox_double);
@@ -3707,17 +4630,26 @@
           &property);
 
       Comment("element_load");
-      Node* key = TryToIntptr(p->name, miss);
+      Node* intptr_index = TryToIntptr(p->name, miss);
       Node* elements = LoadElements(p->receiver);
       Node* is_jsarray =
           WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
-      EmitBoundsCheck(p->receiver, elements, key, is_jsarray, miss);
-      Label if_hole(this);
-
+      Node* is_jsarray_condition = WordNotEqual(is_jsarray, IntPtrConstant(0));
       Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
+      Label if_hole(this), unimplemented_elements_kind(this);
+      Label* out_of_bounds = miss;
+      EmitElementLoad(p->receiver, elements, elements_kind, intptr_index,
+                      is_jsarray_condition, &if_hole, &rebox_double,
+                      &var_double_value, &unimplemented_elements_kind,
+                      out_of_bounds, miss);
 
-      EmitElementLoad(p->receiver, elements, elements_kind, key, &if_hole,
-                      &rebox_double, &var_double_value, miss);
+      Bind(&unimplemented_elements_kind);
+      {
+        // Smi handlers should only be installed for supported elements kinds.
+        // Crash if we get here.
+        DebugBreak();
+        Goto(miss);
+      }
 
       Bind(&if_hole);
       {
@@ -3799,8 +4731,9 @@
   Node* receiver_map = LoadReceiverMap(p->receiver);
 
   // Check monomorphic case.
-  Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
-                                      &var_handler, &try_polymorphic);
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
   Bind(&if_handler);
   {
     HandleLoadICHandlerCase(p, var_handler.value(), &miss);
@@ -3810,10 +4743,9 @@
   {
     // Check polymorphic case.
     Comment("LoadIC_try_polymorphic");
-    GotoUnless(
-        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
-        &try_megamorphic);
-    HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+    GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+               &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
                           &miss, 2);
   }
 
@@ -3845,8 +4777,9 @@
   Node* receiver_map = LoadReceiverMap(p->receiver);
 
   // Check monomorphic case.
-  Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
-                                      &var_handler, &try_polymorphic);
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
   Bind(&if_handler);
   {
     HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
@@ -3856,10 +4789,9 @@
   {
     // Check polymorphic case.
     Comment("KeyedLoadIC_try_polymorphic");
-    GotoUnless(
-        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
-        &try_megamorphic);
-    HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+    GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+               &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
                           &miss, 2);
   }
 
@@ -3885,8 +4817,8 @@
         p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
         FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
     Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
-    HandlePolymorphicCase(p, receiver_map, array, &if_handler, &var_handler,
-                          &miss, 1);
+    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+                          1);
   }
   Bind(&miss);
   {
@@ -3896,6 +4828,210 @@
   }
 }
 
+void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_details(this, MachineRepresentation::kWord32);
+  Variable var_value(this, MachineRepresentation::kTagged);
+  Label if_index(this), if_unique_name(this), if_element_hole(this),
+      if_oob(this), slow(this), stub_cache_miss(this),
+      if_property_dictionary(this), if_found_on_receiver(this);
+
+  Node* receiver = p->receiver;
+  GotoIf(WordIsSmi(receiver), &slow);
+  Node* receiver_map = LoadMap(receiver);
+  Node* instance_type = LoadMapInstanceType(receiver_map);
+  // Receivers requiring non-standard element accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+         &slow);
+
+  Node* key = p->name;
+  TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
+
+  Bind(&if_index);
+  {
+    Comment("integer index");
+    Node* index = var_index.value();
+    Node* elements = LoadElements(receiver);
+    Node* elements_kind = LoadMapElementsKind(receiver_map);
+    Node* is_jsarray_condition =
+        Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+    Variable var_double_value(this, MachineRepresentation::kFloat64);
+    Label rebox_double(this, &var_double_value);
+
+    // Unimplemented elements kinds fall back to a runtime call.
+    Label* unimplemented_elements_kind = &slow;
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+    EmitElementLoad(receiver, elements, elements_kind, index,
+                    is_jsarray_condition, &if_element_hole, &rebox_double,
+                    &var_double_value, unimplemented_elements_kind, &if_oob,
+                    &slow);
+
+    Bind(&rebox_double);
+    Return(AllocateHeapNumberWithValue(var_double_value.value()));
+  }
+
+  Bind(&if_oob);
+  {
+    Comment("out of bounds");
+    Node* index = var_index.value();
+    // Negative keys can't take the fast OOB path.
+    GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
+    // Positive OOB indices are effectively the same as hole loads.
+    Goto(&if_element_hole);
+  }
+
+  Bind(&if_element_hole);
+  {
+    Comment("found the hole");
+    Label return_undefined(this);
+    BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
+
+    Bind(&return_undefined);
+    Return(UndefinedConstant());
+  }
+
+  Node* properties = nullptr;
+  Bind(&if_unique_name);
+  {
+    Comment("key is unique name");
+    // Check if the receiver has fast or slow properties.
+    properties = LoadProperties(receiver);
+    Node* properties_map = LoadMap(properties);
+    GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+           &if_property_dictionary);
+
+    // Try looking up the property on the receiver; if unsuccessful, look
+    // for a handler in the stub cache.
+    Comment("DescriptorArray lookup");
+
+    // Skip linear search if there are too many descriptors.
+    // TODO(jkummerow): Consider implementing binary search.
+    // See also TryLookupProperty() which has the same limitation.
+    const int32_t kMaxLinear = 210;
+    Label stub_cache(this);
+    Node* bitfield3 = LoadMapBitField3(receiver_map);
+    Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+    GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
+    Node* descriptors = LoadMapDescriptors(receiver_map);
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label if_descriptor_found(this);
+    DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
+                           &var_name_index, &stub_cache);
+
+    Bind(&if_descriptor_found);
+    {
+      LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+                                 var_name_index.value(), &var_details,
+                                 &var_value);
+      Goto(&if_found_on_receiver);
+    }
+
+    Bind(&stub_cache);
+    {
+      Comment("stub cache probe for fast property load");
+      Variable var_handler(this, MachineRepresentation::kTagged);
+      Label found_handler(this, &var_handler), stub_cache_miss(this);
+      TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+                        &found_handler, &var_handler, &stub_cache_miss);
+      Bind(&found_handler);
+      { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
+
+      Bind(&stub_cache_miss);
+      {
+        Comment("KeyedLoadGeneric_miss");
+        TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+                        p->name, p->slot, p->vector);
+      }
+    }
+  }
+
+  Bind(&if_property_dictionary);
+  {
+    Comment("dictionary property load");
+    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+    // seeing global objects here (which would need special handling).
+
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label dictionary_found(this, &var_name_index);
+    NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+                                         &var_name_index, &slow);
+    Bind(&dictionary_found);
+    {
+      LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+                                     &var_details, &var_value);
+      Goto(&if_found_on_receiver);
+    }
+  }
+
+  Bind(&if_found_on_receiver);
+  {
+    Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+                                       p->context, receiver, &slow);
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+    Return(value);
+  }
+
+  Bind(&slow);
+  {
+    Comment("KeyedLoadGeneric_slow");
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
+    // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+    TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
+                    p->name);
+  }
+}
+
+void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  {
+    Comment("StoreIC_if_handler");
+    StoreWithVectorDescriptor descriptor(isolate());
+    TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+                 p->name, p->value, p->slot, p->vector);
+  }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("StoreIC_try_polymorphic");
+    GotoUnless(
+        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+        &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoUnless(
+        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+        &miss);
+
+    TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
 void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
   Label try_handler(this), miss(this);
   Node* weak_cell =
@@ -3921,8 +5057,8 @@
     AssertInstanceType(handler, CODE_TYPE);
     LoadWithVectorDescriptor descriptor(isolate());
     Node* native_context = LoadNativeContext(p->context);
-    Node* receiver = LoadFixedArrayElement(
-        native_context, Int32Constant(Context::EXTENSION_INDEX));
+    Node* receiver =
+        LoadContextElement(native_context, Context::EXTENSION_INDEX);
     Node* fake_name = IntPtrConstant(0);
     TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
                  p->vector);
@@ -3934,6 +5070,573 @@
   }
 }
 
+void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
+  Node* properties = LoadProperties(object);
+  Node* length = LoadFixedArrayBaseLength(properties);
+
+  ParameterMode mode = OptimalParameterMode();
+  length = UntagParameter(length, mode);
+
+  Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
+  Node* new_capacity = IntPtrAdd(length, delta);
+
+  // Grow properties array.
+  ElementsKind kind = FAST_ELEMENTS;
+  DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
+         FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+  // The size of a new properties backing store is guaranteed to be small
+  // enough that the new backing store will be allocated in new space.
+  Assert(UintPtrLessThan(new_capacity, IntPtrConstant(kMaxNumberOfDescriptors +
+                                                      JSObject::kFieldsAdded)));
+
+  Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+
+  FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
+                          Heap::kUndefinedValueRootIndex, mode);
+
+  // |new_properties| is guaranteed to be in new space, so we can skip
+  // the write barrier.
+  CopyFixedArrayElements(kind, properties, new_properties, length,
+                         SKIP_WRITE_BARRIER, mode);
+
+  StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+}
+
+Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
+                                              Representation representation,
+                                              Label* bailout) {
+  if (representation.IsDouble()) {
+    Variable var_value(this, MachineRepresentation::kFloat64);
+    Label if_smi(this), if_heap_object(this), done(this);
+    Branch(WordIsSmi(value), &if_smi, &if_heap_object);
+    Bind(&if_smi);
+    {
+      var_value.Bind(SmiToFloat64(value));
+      Goto(&done);
+    }
+    Bind(&if_heap_object);
+    {
+      GotoUnless(
+          Word32Equal(LoadInstanceType(value), Int32Constant(HEAP_NUMBER_TYPE)),
+          bailout);
+      var_value.Bind(LoadHeapNumberValue(value));
+      Goto(&done);
+    }
+    Bind(&done);
+    value = var_value.value();
+  } else if (representation.IsHeapObject()) {
+    // Field type is checked by the handler, here we only check if the value
+    // is a heap object.
+    GotoIf(WordIsSmi(value), bailout);
+  } else if (representation.IsSmi()) {
+    GotoUnless(WordIsSmi(value), bailout);
+  } else {
+    DCHECK(representation.IsTagged());
+  }
+  return value;
+}
+
+void CodeStubAssembler::StoreNamedField(Node* object, FieldIndex index,
+                                        Representation representation,
+                                        Node* value, bool transition_to_field) {
+  DCHECK_EQ(index.is_double(), representation.IsDouble());
+
+  StoreNamedField(object, IntPtrConstant(index.offset()), index.is_inobject(),
+                  representation, value, transition_to_field);
+}
+
+void CodeStubAssembler::StoreNamedField(Node* object, Node* offset,
+                                        bool is_inobject,
+                                        Representation representation,
+                                        Node* value, bool transition_to_field) {
+  bool store_value_as_double = representation.IsDouble();
+  Node* property_storage = object;
+  if (!is_inobject) {
+    property_storage = LoadProperties(object);
+  }
+
+  if (representation.IsDouble()) {
+    if (!FLAG_unbox_double_fields || !is_inobject) {
+      if (transition_to_field) {
+        Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
+        // Store the new mutable heap number into the object.
+        value = heap_number;
+        store_value_as_double = false;
+      } else {
+        // Load the heap number.
+        property_storage = LoadObjectField(property_storage, offset);
+        // Store the double value into it.
+        offset = IntPtrConstant(HeapNumber::kValueOffset);
+      }
+    }
+  }
+
+  if (store_value_as_double) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
+                                   MachineRepresentation::kFloat64);
+  } else if (representation.IsSmi()) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+  } else {
+    StoreObjectField(property_storage, offset, value);
+  }
+}
+
+Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
+                                                  Node* value, Label* bailout) {
+  // Mapped arguments are actual arguments. Unmapped arguments are values added
+  // to the arguments object after it was created for the call. Mapped arguments
+  // are stored in the context at indexes given by elements[key + 2]. Unmapped
+  // arguments are stored as regular indexed properties in the arguments array,
+  // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
+  // look at argument object construction.
+  //
+  // The sloppy arguments elements array has a special format:
+  //
+  // 0: context
+  // 1: unmapped arguments array
+  // 2: mapped_index0,
+  // 3: mapped_index1,
+  // ...
+  //
+  // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
+  // If key + 2 >= elements.length then attempt to look in the unmapped
+  // arguments array (given by elements[1]) and return the value at key, missing
+  // to the runtime if the unmapped arguments array is not a fixed array or if
+  // key >= unmapped_arguments_array.length.
+  //
+  // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
+  // in the unmapped arguments array, as described above. Otherwise, t is a Smi
+  // index into the context array given at elements[0]. Return the value at
+  // context[t].
+
+  bool is_load = value == nullptr;
+
+  GotoUnless(WordIsSmi(key), bailout);
+  key = SmiUntag(key);
+  GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
+
+  Node* elements = LoadElements(receiver);
+  Node* elements_length = LoadAndUntagFixedArrayBaseLength(elements);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  if (!is_load) {
+    var_result.Bind(value);
+  }
+  Label if_mapped(this), if_unmapped(this), end(this, &var_result);
+  Node* intptr_two = IntPtrConstant(2);
+  Node* adjusted_length = IntPtrSub(elements_length, intptr_two);
+
+  GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
+
+  Node* mapped_index = LoadFixedArrayElement(
+      elements, IntPtrAdd(key, intptr_two), 0, INTPTR_PARAMETERS);
+  Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
+
+  Bind(&if_mapped);
+  {
+    Assert(WordIsSmi(mapped_index));
+    mapped_index = SmiUntag(mapped_index);
+    Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
+                                              INTPTR_PARAMETERS);
+    // Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
+    // methods for accessing Context.
+    STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
+    DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
+              FixedArray::OffsetOfElementAt(0));
+    if (is_load) {
+      Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
+                                           INTPTR_PARAMETERS);
+      Assert(WordNotEqual(result, TheHoleConstant()));
+      var_result.Bind(result);
+    } else {
+      StoreFixedArrayElement(the_context, mapped_index, value,
+                             UPDATE_WRITE_BARRIER, INTPTR_PARAMETERS);
+    }
+    Goto(&end);
+  }
+
+  Bind(&if_unmapped);
+  {
+    Node* backing_store = LoadFixedArrayElement(elements, IntPtrConstant(1), 0,
+                                                INTPTR_PARAMETERS);
+    GotoIf(WordNotEqual(LoadMap(backing_store), FixedArrayMapConstant()),
+           bailout);
+
+    Node* backing_store_length =
+        LoadAndUntagFixedArrayBaseLength(backing_store);
+    GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
+
+    // The key falls into unmapped range.
+    if (is_load) {
+      Node* result =
+          LoadFixedArrayElement(backing_store, key, 0, INTPTR_PARAMETERS);
+      GotoIf(WordEqual(result, TheHoleConstant()), bailout);
+      var_result.Bind(result);
+    } else {
+      StoreFixedArrayElement(backing_store, key, value, UPDATE_WRITE_BARRIER,
+                             INTPTR_PARAMETERS);
+    }
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadScriptContext(Node* context, int context_index) {
+  Node* native_context = LoadNativeContext(context);
+  Node* script_context_table =
+      LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX);
+
+  int offset =
+      ScriptContextTable::GetContextOffset(context_index) - kHeapObjectTag;
+  return Load(MachineType::AnyTagged(), script_context_table,
+              IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::ClampedToUint8(Node* int32_value) {
+  Label done(this);
+  Node* int32_zero = Int32Constant(0);
+  Node* int32_255 = Int32Constant(255);
+  Variable var_value(this, MachineRepresentation::kWord32);
+  var_value.Bind(int32_value);
+  GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
+  var_value.Bind(int32_zero);
+  GotoIf(Int32LessThan(int32_value, int32_zero), &done);
+  var_value.Bind(int32_255);
+  Goto(&done);
+  Bind(&done);
+  return var_value.value();
+}
+
+namespace {
+
+// Converts typed array elements kind to a machine representations.
+MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
+  switch (kind) {
+    case UINT8_CLAMPED_ELEMENTS:
+    case UINT8_ELEMENTS:
+    case INT8_ELEMENTS:
+      return MachineRepresentation::kWord8;
+    case UINT16_ELEMENTS:
+    case INT16_ELEMENTS:
+      return MachineRepresentation::kWord16;
+    case UINT32_ELEMENTS:
+    case INT32_ELEMENTS:
+      return MachineRepresentation::kWord32;
+    case FLOAT32_ELEMENTS:
+      return MachineRepresentation::kFloat32;
+    case FLOAT64_ELEMENTS:
+      return MachineRepresentation::kFloat64;
+    default:
+      UNREACHABLE();
+      return MachineRepresentation::kNone;
+  }
+}
+
+}  // namespace
+
+void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
+                                     Node* index, Node* value,
+                                     ParameterMode mode) {
+  if (IsFixedTypedArrayElementsKind(kind)) {
+    if (kind == UINT8_CLAMPED_ELEMENTS) {
+      value = ClampedToUint8(value);
+    }
+    Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
+    MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
+    StoreNoWriteBarrier(rep, elements, offset, value);
+    return;
+  }
+
+  WriteBarrierMode barrier_mode =
+      IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+  if (IsFastDoubleElementsKind(kind)) {
+    // Make sure we do not store signalling NaNs into double arrays.
+    value = Float64SilenceNaN(value);
+    StoreFixedDoubleArrayElement(elements, index, value, mode);
+  } else {
+    StoreFixedArrayElement(elements, index, value, barrier_mode, mode);
+  }
+}
+
+void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
+                                         bool is_jsarray,
+                                         ElementsKind elements_kind,
+                                         KeyedAccessStoreMode store_mode,
+                                         Label* bailout) {
+  Node* elements = LoadElements(object);
+  if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+      store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+    // Bailout in case of COW elements.
+    GotoIf(WordNotEqual(LoadMap(elements),
+                        LoadRoot(Heap::kFixedArrayMapRootIndex)),
+           bailout);
+  }
+  // TODO(ishell): introduce TryToIntPtrOrSmi() and use OptimalParameterMode().
+  ParameterMode parameter_mode = INTPTR_PARAMETERS;
+  key = TryToIntptr(key, bailout);
+
+  if (IsFixedTypedArrayElementsKind(elements_kind)) {
+    Label done(this);
+    // TODO(ishell): call ToNumber() on value and don't bailout but be careful
+    // to call it only once if we decide to bailout because of bounds checks.
+
+    if (IsFixedFloatElementsKind(elements_kind)) {
+      // TODO(ishell): move float32 truncation into PrepareValueForWrite.
+      value = PrepareValueForWrite(value, Representation::Double(), bailout);
+      if (elements_kind == FLOAT32_ELEMENTS) {
+        value = TruncateFloat64ToFloat32(value);
+      }
+    } else {
+      // TODO(ishell): It's fine for word8/16/32 to truncate the result.
+      value = TryToIntptr(value, bailout);
+    }
+
+    // There must be no allocations between the buffer load and
+    // and the actual store to backing store, because GC may decide that
+    // the buffer is not alive or move the elements.
+    // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
+
+    // Check if buffer has been neutered.
+    Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+    Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
+                                     MachineType::Uint32());
+    Node* neutered_bit =
+        Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
+    GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), bailout);
+
+    // Bounds check.
+    Node* length = UntagParameter(
+        LoadObjectField(object, JSTypedArray::kLengthOffset), parameter_mode);
+
+    if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+      // Skip the store if we write beyond the length.
+      GotoUnless(IntPtrLessThan(key, length), &done);
+      // ... but bailout if the key is negative.
+    } else {
+      DCHECK_EQ(STANDARD_STORE, store_mode);
+    }
+    GotoUnless(UintPtrLessThan(key, length), bailout);
+
+    // Backing store = external_pointer + base_pointer.
+    Node* external_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+                        MachineType::Pointer());
+    Node* base_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+    Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
+    StoreElement(backing_store, elements_kind, key, value, parameter_mode);
+    Goto(&done);
+
+    Bind(&done);
+    return;
+  }
+  DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
+         IsFastDoubleElementsKind(elements_kind));
+
+  Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
+                            : LoadFixedArrayBaseLength(elements);
+  length = UntagParameter(length, parameter_mode);
+
+  // In case value is stored into a fast smi array, assure that the value is
+  // a smi before manipulating the backing store. Otherwise the backing store
+  // may be left in an invalid state.
+  if (IsFastSmiElementsKind(elements_kind)) {
+    GotoUnless(WordIsSmi(value), bailout);
+  } else if (IsFastDoubleElementsKind(elements_kind)) {
+    value = PrepareValueForWrite(value, Representation::Double(), bailout);
+  }
+
+  if (IsGrowStoreMode(store_mode)) {
+    elements = CheckForCapacityGrow(object, elements, elements_kind, length,
+                                    key, parameter_mode, is_jsarray, bailout);
+  } else {
+    GotoUnless(UintPtrLessThan(key, length), bailout);
+
+    if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
+        IsFastSmiOrObjectElementsKind(elements_kind)) {
+      elements = CopyElementsOnWrite(object, elements, elements_kind, length,
+                                     parameter_mode, bailout);
+    }
+  }
+  StoreElement(elements, elements_kind, key, value, parameter_mode);
+}
+
+Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
+                                              ElementsKind kind, Node* length,
+                                              Node* key, ParameterMode mode,
+                                              bool is_js_array,
+                                              Label* bailout) {
+  Variable checked_elements(this, MachineRepresentation::kTagged);
+  Label grow_case(this), no_grow_case(this), done(this);
+
+  Node* condition;
+  if (IsHoleyElementsKind(kind)) {
+    condition = UintPtrGreaterThanOrEqual(key, length);
+  } else {
+    condition = WordEqual(key, length);
+  }
+  Branch(condition, &grow_case, &no_grow_case);
+
+  Bind(&grow_case);
+  {
+    Node* current_capacity =
+        UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+
+    checked_elements.Bind(elements);
+
+    Label fits_capacity(this);
+    GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
+    {
+      Node* new_elements = TryGrowElementsCapacity(
+          object, elements, kind, key, current_capacity, mode, bailout);
+
+      checked_elements.Bind(new_elements);
+      Goto(&fits_capacity);
+    }
+    Bind(&fits_capacity);
+
+    if (is_js_array) {
+      Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
+      StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
+                                     TagParameter(new_length, mode));
+    }
+    Goto(&done);
+  }
+
+  Bind(&no_grow_case);
+  {
+    GotoUnless(UintPtrLessThan(key, length), bailout);
+    checked_elements.Bind(elements);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return checked_elements.value();
+}
+
+Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
+                                             ElementsKind kind, Node* length,
+                                             ParameterMode mode,
+                                             Label* bailout) {
+  Variable new_elements_var(this, MachineRepresentation::kTagged);
+  Label done(this);
+
+  new_elements_var.Bind(elements);
+  GotoUnless(
+      WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+      &done);
+  {
+    Node* capacity = UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+    Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
+                                              length, capacity, mode, bailout);
+
+    new_elements_var.Bind(new_elements);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return new_elements_var.value();
+}
+
+void CodeStubAssembler::TransitionElementsKind(
+    compiler::Node* object, compiler::Node* map, ElementsKind from_kind,
+    ElementsKind to_kind, bool is_jsarray, Label* bailout) {
+  DCHECK(!IsFastHoleyElementsKind(from_kind) ||
+         IsFastHoleyElementsKind(to_kind));
+  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+    TrapAllocationMemento(object, bailout);
+  }
+
+  if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Comment("Non-simple map transition");
+    Node* elements = LoadElements(object);
+
+    Node* empty_fixed_array =
+        HeapConstant(isolate()->factory()->empty_fixed_array());
+
+    Label done(this);
+    GotoIf(WordEqual(elements, empty_fixed_array), &done);
+
+    // TODO(ishell): Use OptimalParameterMode().
+    ParameterMode mode = INTPTR_PARAMETERS;
+    Node* elements_length = SmiUntag(LoadFixedArrayBaseLength(elements));
+    Node* array_length =
+        is_jsarray ? SmiUntag(LoadObjectField(object, JSArray::kLengthOffset))
+                   : elements_length;
+
+    GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
+                         elements_length, mode, bailout);
+    Goto(&done);
+    Bind(&done);
+  }
+
+  StoreObjectField(object, JSObject::kMapOffset, map);
+}
+
+void CodeStubAssembler::TrapAllocationMemento(Node* object,
+                                              Label* memento_found) {
+  Comment("[ TrapAllocationMemento");
+  Label no_memento_found(this);
+  Label top_check(this), map_check(this);
+
+  Node* new_space_top_address = ExternalConstant(
+      ExternalReference::new_space_allocation_top_address(isolate()));
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+  // Bail out if the object is not in new space.
+  Node* object_page = PageFromAddress(object);
+  {
+    const int mask =
+        (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+    Node* page_flags = Load(MachineType::IntPtr(), object_page);
+    GotoIf(
+        WordEqual(WordAnd(page_flags, IntPtrConstant(mask)), IntPtrConstant(0)),
+        &no_memento_found);
+  }
+
+  Node* memento_end = IntPtrAdd(object, IntPtrConstant(kMementoEndOffset));
+  Node* memento_end_page = PageFromAddress(memento_end);
+
+  Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
+  Node* new_space_top_page = PageFromAddress(new_space_top);
+
+  // If the object is in new space, we need to check whether it is and
+  // respective potential memento object on the same page as the current top.
+  GotoIf(WordEqual(memento_end_page, new_space_top_page), &top_check);
+
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  Branch(WordEqual(object_page, memento_end_page), &map_check,
+         &no_memento_found);
+
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  Bind(&top_check);
+  {
+    Branch(UintPtrGreaterThan(memento_end, new_space_top), &no_memento_found,
+           &map_check);
+  }
+
+  // Memento map check.
+  Bind(&map_check);
+  {
+    Node* memento_map = LoadObjectField(object, kMementoMapOffset);
+    Branch(
+        WordEqual(memento_map, LoadRoot(Heap::kAllocationMementoMapRootIndex)),
+        memento_found, &no_memento_found);
+  }
+  Bind(&no_memento_found);
+  Comment("] TrapAllocationMemento");
+}
+
+Node* CodeStubAssembler::PageFromAddress(Node* address) {
+  return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask));
+}
+
 Node* CodeStubAssembler::EnumLength(Node* map) {
   Node* bitfield_3 = LoadMapBitField3(map);
   Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
@@ -4001,6 +5704,52 @@
   }
 }
 
+Node* CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
+    Node* feedback_vector, Node* slot) {
+  Node* size = IntPtrConstant(AllocationSite::kSize);
+  Node* site = Allocate(size, CodeStubAssembler::kPretenured);
+
+  // Store the map
+  StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
+                       Heap::kAllocationSiteMapRootIndex);
+  Node* kind = SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+  StoreObjectFieldNoWriteBarrier(site, AllocationSite::kTransitionInfoOffset,
+                                 kind);
+
+  // Unlike literals, constructed arrays don't have nested sites
+  Node* zero = IntPtrConstant(0);
+  StoreObjectFieldNoWriteBarrier(site, AllocationSite::kNestedSiteOffset, zero);
+
+  // Pretenuring calculation field.
+  StoreObjectFieldNoWriteBarrier(site, AllocationSite::kPretenureDataOffset,
+                                 zero);
+
+  // Pretenuring memento creation count field.
+  StoreObjectFieldNoWriteBarrier(
+      site, AllocationSite::kPretenureCreateCountOffset, zero);
+
+  // Store an empty fixed array for the code dependency.
+  StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+
+  // Link the object to the allocation site list
+  Node* site_list = ExternalConstant(
+      ExternalReference::allocation_sites_list_address(isolate()));
+  Node* next_site = LoadBufferObject(site_list, 0);
+
+  // TODO(mvstanton): This is a store to a weak pointer, which we may want to
+  // mark as such in order to skip the write barrier, once we have a unified
+  // system for weakness. For now we decided to keep it like this because having
+  // an initial write barrier backed store makes this pointer strong until the
+  // next GC, and allocation sites are designed to survive several GCs anyway.
+  StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
+  StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list, site);
+
+  StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER,
+                         CodeStubAssembler::SMI_PARAMETERS);
+  return site;
+}
+
 Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
                                                         Node* slot,
                                                         Node* value) {
diff --git a/src/code-stub-assembler.h b/src/code-stub-assembler.h
index 4bad541..c55f48c 100644
--- a/src/code-stub-assembler.h
+++ b/src/code-stub-assembler.h
@@ -19,6 +19,20 @@
 
 enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 
+#define HEAP_CONSTANT_LIST(V)                 \
+  V(BooleanMap, BooleanMap)                   \
+  V(empty_string, EmptyString)                \
+  V(EmptyFixedArray, EmptyFixedArray)         \
+  V(FixedArrayMap, FixedArrayMap)             \
+  V(FixedCOWArrayMap, FixedCOWArrayMap)       \
+  V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
+  V(HeapNumberMap, HeapNumberMap)             \
+  V(MinusZeroValue, MinusZero)                \
+  V(NanValue, Nan)                            \
+  V(NullValue, Null)                          \
+  V(TheHoleValue, TheHole)                    \
+  V(UndefinedValue, Undefined)
+
 // Provides JavaScript-specific "macro-assembler" functionality on top of the
 // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
 // it's possible to add JavaScript-specific useful CodeAssembler "macros"
@@ -46,17 +60,40 @@
 
   typedef base::Flags<AllocationFlag> AllocationFlags;
 
-  enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
+  // TODO(ishell): Fix all loads/stores from arrays by int32 offsets/indices
+  // and eventually remove INTEGER_PARAMETERS in favour of INTPTR_PARAMETERS.
+  enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS, INTPTR_PARAMETERS };
 
-  compiler::Node* BooleanMapConstant();
-  compiler::Node* EmptyStringConstant();
-  compiler::Node* HeapNumberMapConstant();
+  // On 32-bit platforms, there is a slight performance advantage to doing all
+  // of the array offset/index arithmetic with SMIs, since it's possible
+  // to save a few tag/untag operations without paying an extra expense when
+  // calculating array offset (the smi math can be folded away) and there are
+  // fewer live ranges. Thus only convert indices to untagged value on 64-bit
+  // platforms.
+  ParameterMode OptimalParameterMode() const {
+    return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
+  }
+
+  compiler::Node* UntagParameter(compiler::Node* value, ParameterMode mode) {
+    if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+    return value;
+  }
+
+  compiler::Node* TagParameter(compiler::Node* value, ParameterMode mode) {
+    if (mode != SMI_PARAMETERS) value = SmiTag(value);
+    return value;
+  }
+
   compiler::Node* NoContextConstant();
-  compiler::Node* NanConstant();
-  compiler::Node* NullConstant();
-  compiler::Node* MinusZeroConstant();
-  compiler::Node* UndefinedConstant();
-  compiler::Node* TheHoleConstant();
+#define HEAP_CONSTANT_ACCESSOR(rootName, name) compiler::Node* name##Constant();
+  HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
+#undef HEAP_CONSTANT_ACCESSOR
+
+#define HEAP_CONSTANT_TEST(rootName, name) \
+  compiler::Node* Is##name(compiler::Node* value);
+  HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
+#undef HEAP_CONSTANT_TEST
+
   compiler::Node* HashSeed();
   compiler::Node* StaleRegisterConstant();
 
@@ -86,14 +123,20 @@
   compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiBelow(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b);
   compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
   // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
   compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
   // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
   compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
+    return WordOr(a, b);
+  }
 
   // Allocate an object of the given size.
   compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
@@ -106,7 +149,7 @@
 
   // Check a value for smi-ness
   compiler::Node* WordIsSmi(compiler::Node* a);
-  // Check that the value is a positive smi.
+  // Check that the value is a non-negative smi.
   compiler::Node* WordIsPositiveSmi(compiler::Node* a);
 
   void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
@@ -143,10 +186,6 @@
                          if_notequal);
   }
 
-  void BranchIfSameValueZero(compiler::Node* a, compiler::Node* b,
-                             compiler::Node* context, Label* if_true,
-                             Label* if_false);
-
   void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
                            Label* if_true, Label* if_false);
 
@@ -188,6 +227,8 @@
   compiler::Node* LoadProperties(compiler::Node* object);
   // Load the elements backing store of a JSObject.
   compiler::Node* LoadElements(compiler::Node* object);
+  // Load the length of a JSArray instance.
+  compiler::Node* LoadJSArrayLength(compiler::Node* array);
   // Load the length of a fixed array base instance.
   compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
   // Load the length of a fixed array base instance.
@@ -200,6 +241,8 @@
   compiler::Node* LoadMapBitField3(compiler::Node* map);
   // Load the instance type of a map.
   compiler::Node* LoadMapInstanceType(compiler::Node* map);
+  // Load the ElementsKind of a map.
+  compiler::Node* LoadMapElementsKind(compiler::Node* map);
   // Load the instance descriptors of a map.
   compiler::Node* LoadMapDescriptors(compiler::Node* map);
   // Load the prototype of a map.
@@ -208,13 +251,16 @@
   compiler::Node* LoadMapInstanceSize(compiler::Node* map);
   // Load the inobject properties count of a Map (valid only for JSObjects).
   compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+  // Load the constructor function index of a Map (only for primitive maps).
+  compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
   // Load the constructor of a Map (equivalent to Map::GetConstructor()).
   compiler::Node* LoadMapConstructor(compiler::Node* map);
 
-  // Load the hash field of a name.
+  // Load the hash field of a name as an uint32 value.
   compiler::Node* LoadNameHashField(compiler::Node* name);
-  // Load the hash value of a name. If {if_hash_not_computed} label
-  // is specified then it also checks if hash is actually computed.
+  // Load the hash value of a name as an uint32 value.
+  // If {if_hash_not_computed} label is specified then it also checks if
+  // hash is actually computed.
   compiler::Node* LoadNameHash(compiler::Node* name,
                                Label* if_hash_not_computed = nullptr);
 
@@ -226,25 +272,30 @@
   compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
                                     Label* if_cleared = nullptr);
 
-  compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
-
   // Load an array element from a FixedArray.
   compiler::Node* LoadFixedArrayElement(
-      compiler::Node* object, compiler::Node* int32_index,
-      int additional_offset = 0,
+      compiler::Node* object, compiler::Node* index, int additional_offset = 0,
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
   // Load an array element from a FixedArray, untag it and return it as Word32.
   compiler::Node* LoadAndUntagToWord32FixedArrayElement(
-      compiler::Node* object, compiler::Node* int32_index,
-      int additional_offset = 0,
+      compiler::Node* object, compiler::Node* index, int additional_offset = 0,
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
   // Load an array element from a FixedDoubleArray.
   compiler::Node* LoadFixedDoubleArrayElement(
-      compiler::Node* object, compiler::Node* int32_index,
-      MachineType machine_type, int additional_offset = 0,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+      compiler::Node* object, compiler::Node* index, MachineType machine_type,
+      int additional_offset = 0,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS,
+      Label* if_hole = nullptr);
+
+  // Load Float64 value by |base| + |offset| address. If the value is a double
+  // hole then jump to |if_hole|. If |machine_type| is None then only the hole
+  // check is generated.
+  compiler::Node* LoadDoubleWithHoleCheck(
+      compiler::Node* base, compiler::Node* offset, Label* if_hole,
+      MachineType machine_type = MachineType::Float64());
 
   // Context manipulation
+  compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
   compiler::Node* LoadNativeContext(compiler::Node* context);
 
   compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
@@ -256,9 +307,15 @@
   // Store a field to an object on the heap.
   compiler::Node* StoreObjectField(
       compiler::Node* object, int offset, compiler::Node* value);
+  compiler::Node* StoreObjectField(compiler::Node* object,
+                                   compiler::Node* offset,
+                                   compiler::Node* value);
   compiler::Node* StoreObjectFieldNoWriteBarrier(
       compiler::Node* object, int offset, compiler::Node* value,
       MachineRepresentation rep = MachineRepresentation::kTagged);
+  compiler::Node* StoreObjectFieldNoWriteBarrier(
+      compiler::Node* object, compiler::Node* offset, compiler::Node* value,
+      MachineRepresentation rep = MachineRepresentation::kTagged);
   // Store the Map of an HeapObject.
   compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
                                          compiler::Node* map);
@@ -275,9 +332,10 @@
       ParameterMode parameter_mode = INTEGER_PARAMETERS);
 
   // Allocate a HeapNumber without initializing its value.
-  compiler::Node* AllocateHeapNumber();
+  compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
   // Allocate a HeapNumber with a specific value.
-  compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
+  compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
+                                              MutableMode mode = IMMUTABLE);
   // Allocate a SeqOneByteString with the given length.
   compiler::Node* AllocateSeqOneByteString(int length);
   compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
@@ -286,37 +344,122 @@
   compiler::Node* AllocateSeqTwoByteString(int length);
   compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
                                            compiler::Node* length);
-  // Allocated an JSArray
-  compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
-                                  compiler::Node* capacity,
-                                  compiler::Node* length,
-                                  compiler::Node* allocation_site = nullptr,
-                                  ParameterMode mode = INTEGER_PARAMETERS);
+
+  // Allocate a SlicedOneByteString with the given length, parent and offset.
+  // |length| and |offset| are expected to be tagged.
+  compiler::Node* AllocateSlicedOneByteString(compiler::Node* length,
+                                              compiler::Node* parent,
+                                              compiler::Node* offset);
+  // Allocate a SlicedTwoByteString with the given length, parent and offset.
+  // |length| and |offset| are expected to be tagged.
+  compiler::Node* AllocateSlicedTwoByteString(compiler::Node* length,
+                                              compiler::Node* parent,
+                                              compiler::Node* offset);
+
+  // Allocate a RegExpResult with the given length (the number of captures,
+  // including the match itself), index (the index where the match starts),
+  // and input string. |length| and |index| are expected to be tagged, and
+  // |input| must be a string.
+  compiler::Node* AllocateRegExpResult(compiler::Node* context,
+                                       compiler::Node* length,
+                                       compiler::Node* index,
+                                       compiler::Node* input);
+
+  // Allocate a JSArray without elements and initialize the header fields.
+  compiler::Node* AllocateUninitializedJSArrayWithoutElements(
+      ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
+      compiler::Node* allocation_site);
+  // Allocate and return a JSArray with initialized header fields and its
+  // uninitialized elements.
+  // The ParameterMode argument is only used for the capacity parameter.
+  std::pair<compiler::Node*, compiler::Node*>
+  AllocateUninitializedJSArrayWithElements(
+      ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
+      compiler::Node* allocation_site, compiler::Node* capacity,
+      ParameterMode capacity_mode = INTEGER_PARAMETERS);
+  // Allocate a JSArray and fill elements with the hole.
+  // The ParameterMode argument is only used for the capacity parameter.
+  compiler::Node* AllocateJSArray(
+      ElementsKind kind, compiler::Node* array_map, compiler::Node* capacity,
+      compiler::Node* length, compiler::Node* allocation_site = nullptr,
+      ParameterMode capacity_mode = INTEGER_PARAMETERS);
 
   compiler::Node* AllocateFixedArray(ElementsKind kind,
                                      compiler::Node* capacity,
                                      ParameterMode mode = INTEGER_PARAMETERS,
                                      AllocationFlags flags = kNone);
 
-  void FillFixedArrayWithHole(ElementsKind kind, compiler::Node* array,
-                              compiler::Node* from_index,
-                              compiler::Node* to_index,
-                              ParameterMode mode = INTEGER_PARAMETERS);
+  void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
+                               compiler::Node* from_index,
+                               compiler::Node* to_index,
+                               Heap::RootListIndex value_root_index,
+                               ParameterMode mode = INTEGER_PARAMETERS);
 
+  // Copies all elements from |from_array| of |length| size to
+  // |to_array| of the same size respecting the elements kind.
   void CopyFixedArrayElements(
       ElementsKind kind, compiler::Node* from_array, compiler::Node* to_array,
-      compiler::Node* element_count,
+      compiler::Node* length,
+      WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+      ParameterMode mode = INTEGER_PARAMETERS) {
+    CopyFixedArrayElements(kind, from_array, kind, to_array, length, length,
+                           barrier_mode, mode);
+  }
+
+  // Copies |element_count| elements from |from_array| to |to_array| of
+  // |capacity| size respecting both array's elements kinds.
+  void CopyFixedArrayElements(
+      ElementsKind from_kind, compiler::Node* from_array, ElementsKind to_kind,
+      compiler::Node* to_array, compiler::Node* element_count,
+      compiler::Node* capacity,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
       ParameterMode mode = INTEGER_PARAMETERS);
 
+  // Copies |character_count| elements from |from_string| to |to_string|
+  // starting at the |from_index|'th character. |from_index| and
+  // |character_count| must be Smis s.t.
+  // 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+  void CopyStringCharacters(compiler::Node* from_string,
+                            compiler::Node* to_string,
+                            compiler::Node* from_index,
+                            compiler::Node* character_count,
+                            String::Encoding encoding);
+
+  // Loads an element from |array| of |from_kind| elements by given |offset|
+  // (NOTE: not index!), does a hole check if |if_hole| is provided and
+  // converts the value so that it becomes ready for storing to array of
+  // |to_kind| elements.
+  compiler::Node* LoadElementAndPrepareForStore(compiler::Node* array,
+                                                compiler::Node* offset,
+                                                ElementsKind from_kind,
+                                                ElementsKind to_kind,
+                                                Label* if_hole);
+
   compiler::Node* CalculateNewElementsCapacity(
       compiler::Node* old_capacity, ParameterMode mode = INTEGER_PARAMETERS);
 
-  compiler::Node* CheckAndGrowElementsCapacity(compiler::Node* context,
-                                               compiler::Node* elements,
-                                               ElementsKind kind,
-                                               compiler::Node* key,
-                                               Label* fail);
+  // Tries to grow the |elements| array of given |object| to store the |key|
+  // or bails out if the growing gap is too big. Returns new elements.
+  compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
+                                          compiler::Node* elements,
+                                          ElementsKind kind,
+                                          compiler::Node* key, Label* bailout);
+
+  // Tries to grow the |capacity|-length |elements| array of given |object|
+  // to store the |key| or bails out if the growing gap is too big. Returns
+  // new elements.
+  compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
+                                          compiler::Node* elements,
+                                          ElementsKind kind,
+                                          compiler::Node* key,
+                                          compiler::Node* capacity,
+                                          ParameterMode mode, Label* bailout);
+
+  // Grows elements capacity of given object. Returns new elements.
+  compiler::Node* GrowElementsCapacity(
+      compiler::Node* object, compiler::Node* elements, ElementsKind from_kind,
+      ElementsKind to_kind, compiler::Node* capacity,
+      compiler::Node* new_capacity, ParameterMode mode, Label* bailout);
 
   // Allocation site manipulation
   void InitializeAllocationMemento(compiler::Node* base_allocation,
@@ -347,19 +490,67 @@
                               PrimitiveType primitive_type,
                               char const* method_name);
 
+  // Throws a TypeError for {method_name} if {value} is not of the given
+  // instance type. Returns {value}'s map.
+  compiler::Node* ThrowIfNotInstanceType(compiler::Node* context,
+                                         compiler::Node* value,
+                                         InstanceType instance_type,
+                                         char const* method_name);
+
+  // Type checks.
+  compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
+  compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
+
   // String helpers.
   // Load a character from a String (might flatten a ConsString).
   compiler::Node* StringCharCodeAt(compiler::Node* string,
                                    compiler::Node* smi_index);
   // Return the single character string with only {code}.
   compiler::Node* StringFromCharCode(compiler::Node* code);
+  // Return a new string object which holds a substring containing the range
+  // [from,to[ of string.  |from| and |to| are expected to be tagged.
+  compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
+                            compiler::Node* from, compiler::Node* to);
 
-  // Returns a node that is true if the given bit is set in |word32|.
+  compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
+                                      UnicodeEncoding encoding);
+
+  // Type conversion helpers.
+  // Convert a String to a Number.
+  compiler::Node* StringToNumber(compiler::Node* context,
+                                 compiler::Node* input);
+  // Convert an object to a name.
+  compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
+  // Convert a Non-Number object to a Number.
+  compiler::Node* NonNumberToNumber(compiler::Node* context,
+                                    compiler::Node* input);
+  // Convert any object to a Number.
+  compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
+
+  enum ToIntegerTruncationMode {
+    kNoTruncation,
+    kTruncateMinusZero,
+  };
+
+  // Convert any object to an Integer.
+  compiler::Node* ToInteger(compiler::Node* context, compiler::Node* input,
+                            ToIntegerTruncationMode mode = kNoTruncation);
+
+  // Returns a node that contains a decoded (unsigned!) value of a bit
+  // field |T| in |word32|. Returns result as an uint32 node.
   template <typename T>
   compiler::Node* BitFieldDecode(compiler::Node* word32) {
     return BitFieldDecode(word32, T::kShift, T::kMask);
   }
 
+  // Returns a node that contains a decoded (unsigned!) value of a bit
+  // field |T| in |word32|. Returns result as a word-size node.
+  template <typename T>
+  compiler::Node* BitFieldDecodeWord(compiler::Node* word32) {
+    return ChangeUint32ToWord(BitFieldDecode<T>(word32));
+  }
+
+  // Decodes an unsigned (!) value from |word32| to an uint32 node.
   compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
                                  uint32_t mask);
 
@@ -399,9 +590,9 @@
   compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
 
   template <typename Dictionary>
-  void NumberDictionaryLookup(compiler::Node* dictionary, compiler::Node* key,
-                              Label* if_found, Variable* var_entry,
-                              Label* if_not_found);
+  void NumberDictionaryLookup(compiler::Node* dictionary,
+                              compiler::Node* intptr_index, Label* if_found,
+                              Variable* var_entry, Label* if_not_found);
 
   // Tries to check if {object} has own {unique_name} property.
   void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
@@ -454,9 +645,9 @@
                          Label* if_not_found, Label* if_bailout);
 
   void TryLookupElement(compiler::Node* object, compiler::Node* map,
-                        compiler::Node* instance_type, compiler::Node* index,
-                        Label* if_found, Label* if_not_found,
-                        Label* if_bailout);
+                        compiler::Node* instance_type,
+                        compiler::Node* intptr_index, Label* if_found,
+                        Label* if_not_found, Label* if_bailout);
 
   // This is a type of a lookup in holder generator function. In case of a
   // property lookup the {key} is guaranteed to be a unique name and in case of
@@ -484,7 +675,7 @@
                                       compiler::Node* callable,
                                       compiler::Node* object);
 
-  // LoadIC helpers.
+  // Load/StoreIC helpers.
   struct LoadICParameters {
     LoadICParameters(compiler::Node* context, compiler::Node* receiver,
                      compiler::Node* name, compiler::Node* slot,
@@ -502,6 +693,15 @@
     compiler::Node* vector;
   };
 
+  struct StoreICParameters : public LoadICParameters {
+    StoreICParameters(compiler::Node* context, compiler::Node* receiver,
+                      compiler::Node* name, compiler::Node* value,
+                      compiler::Node* slot, compiler::Node* vector)
+        : LoadICParameters(context, receiver, name, slot, vector),
+          value(value) {}
+    compiler::Node* value;
+  };
+
   // Load type feedback vector from the stub caller's frame.
   compiler::Node* LoadTypeFeedbackVectorForStub();
 
@@ -513,12 +713,12 @@
   compiler::Node* LoadReceiverMap(compiler::Node* receiver);
 
   // Checks monomorphic case. Returns {feedback} entry of the vector.
-  compiler::Node* TryMonomorphicCase(const LoadICParameters* p,
+  compiler::Node* TryMonomorphicCase(compiler::Node* slot,
+                                     compiler::Node* vector,
                                      compiler::Node* receiver_map,
                                      Label* if_handler, Variable* var_handler,
                                      Label* if_miss);
-  void HandlePolymorphicCase(const LoadICParameters* p,
-                             compiler::Node* receiver_map,
+  void HandlePolymorphicCase(compiler::Node* receiver_map,
                              compiler::Node* feedback, Label* if_handler,
                              Variable* var_handler, Label* if_miss,
                              int unroll_count);
@@ -543,9 +743,75 @@
                          compiler::Node* name, Label* if_handler,
                          Variable* var_handler, Label* if_miss);
 
+  // Extends properties backing store by JSObject::kFieldsAdded elements.
+  void ExtendPropertiesBackingStore(compiler::Node* object);
+
+  compiler::Node* PrepareValueForWrite(compiler::Node* value,
+                                       Representation representation,
+                                       Label* bailout);
+
+  void StoreNamedField(compiler::Node* object, FieldIndex index,
+                       Representation representation, compiler::Node* value,
+                       bool transition_to_field);
+
+  void StoreNamedField(compiler::Node* object, compiler::Node* offset,
+                       bool is_inobject, Representation representation,
+                       compiler::Node* value, bool transition_to_field);
+
+  // Emits keyed sloppy arguments load. Returns either the loaded value.
+  compiler::Node* LoadKeyedSloppyArguments(compiler::Node* receiver,
+                                           compiler::Node* key,
+                                           Label* bailout) {
+    return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
+  }
+
+  // Emits keyed sloppy arguments store.
+  void StoreKeyedSloppyArguments(compiler::Node* receiver, compiler::Node* key,
+                                 compiler::Node* value, Label* bailout) {
+    DCHECK_NOT_NULL(value);
+    EmitKeyedSloppyArguments(receiver, key, value, bailout);
+  }
+
+  // Loads script context from the script context table.
+  compiler::Node* LoadScriptContext(compiler::Node* context, int context_index);
+
+  compiler::Node* ClampedToUint8(compiler::Node* int32_value);
+
+  // Store value to an elements array with given elements kind.
+  void StoreElement(compiler::Node* elements, ElementsKind kind,
+                    compiler::Node* index, compiler::Node* value,
+                    ParameterMode mode);
+
+  void EmitElementStore(compiler::Node* object, compiler::Node* key,
+                        compiler::Node* value, bool is_jsarray,
+                        ElementsKind elements_kind,
+                        KeyedAccessStoreMode store_mode, Label* bailout);
+
+  compiler::Node* CheckForCapacityGrow(compiler::Node* object,
+                                       compiler::Node* elements,
+                                       ElementsKind kind,
+                                       compiler::Node* length,
+                                       compiler::Node* key, ParameterMode mode,
+                                       bool is_js_array, Label* bailout);
+
+  compiler::Node* CopyElementsOnWrite(compiler::Node* object,
+                                      compiler::Node* elements,
+                                      ElementsKind kind, compiler::Node* length,
+                                      ParameterMode mode, Label* bailout);
+
   void LoadIC(const LoadICParameters* p);
   void LoadGlobalIC(const LoadICParameters* p);
   void KeyedLoadIC(const LoadICParameters* p);
+  void KeyedLoadICGeneric(const LoadICParameters* p);
+  void StoreIC(const StoreICParameters* p);
+
+  void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
+                              ElementsKind from_kind, ElementsKind to_kind,
+                              bool is_jsarray, Label* bailout);
+
+  void TrapAllocationMemento(compiler::Node* object, Label* memento_found);
+
+  compiler::Node* PageFromAddress(compiler::Node* address);
 
   // Get the enumerable length from |map| and return the result as a Smi.
   compiler::Node* EnumLength(compiler::Node* map);
@@ -562,9 +828,13 @@
       compiler::Node* feedback_vector, compiler::Node* slot,
       compiler::Node* value);
 
-  compiler::Node* GetFixedAarrayAllocationSize(compiler::Node* element_count,
-                                               ElementsKind kind,
-                                               ParameterMode mode) {
+  // Create a new AllocationSite and install it into a feedback vector.
+  compiler::Node* CreateAllocationSiteInFeedbackVector(
+      compiler::Node* feedback_vector, compiler::Node* slot);
+
+  compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
+                                              ElementsKind kind,
+                                              ParameterMode mode) {
     return ElementOffsetFromIndex(element_count, kind, mode,
                                   FixedArray::kHeaderSize);
   }
@@ -572,17 +842,34 @@
  private:
   enum ElementSupport { kOnlyProperties, kSupportElements };
 
+  void DescriptorLookupLinear(compiler::Node* unique_name,
+                              compiler::Node* descriptors, compiler::Node* nof,
+                              Label* if_found, Variable* var_name_index,
+                              Label* if_not_found);
+  compiler::Node* CallGetterIfAccessor(compiler::Node* value,
+                                       compiler::Node* details,
+                                       compiler::Node* context,
+                                       compiler::Node* receiver,
+                                       Label* if_bailout);
+
   void HandleLoadICHandlerCase(
       const LoadICParameters* p, compiler::Node* handler, Label* miss,
       ElementSupport support_elements = kOnlyProperties);
   compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
-  void EmitBoundsCheck(compiler::Node* object, compiler::Node* elements,
-                       compiler::Node* intptr_key, compiler::Node* is_jsarray,
-                       Label* miss);
+  void EmitFastElementsBoundsCheck(compiler::Node* object,
+                                   compiler::Node* elements,
+                                   compiler::Node* intptr_index,
+                                   compiler::Node* is_jsarray_condition,
+                                   Label* miss);
   void EmitElementLoad(compiler::Node* object, compiler::Node* elements,
                        compiler::Node* elements_kind, compiler::Node* key,
-                       Label* if_hole, Label* rebox_double,
-                       Variable* var_double_value, Label* miss);
+                       compiler::Node* is_jsarray_condition, Label* if_hole,
+                       Label* rebox_double, Variable* var_double_value,
+                       Label* unimplemented_elements_kind, Label* out_of_bounds,
+                       Label* miss);
+  void BranchIfPrototypesHaveNoElements(compiler::Node* receiver_map,
+                                        Label* definitely_no_elements,
+                                        Label* possibly_elements);
 
   compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
                                          ElementsKind kind, ParameterMode mode,
@@ -596,9 +883,23 @@
                                        AllocationFlags flags,
                                        compiler::Node* top_adddress,
                                        compiler::Node* limit_address);
+  // Allocate and return a JSArray of given total size in bytes with header
+  // fields initialized.
+  compiler::Node* AllocateUninitializedJSArray(ElementsKind kind,
+                                               compiler::Node* array_map,
+                                               compiler::Node* length,
+                                               compiler::Node* allocation_site,
+                                               compiler::Node* size_in_bytes);
 
   compiler::Node* SmiShiftBitsConstant();
 
+  // Emits keyed sloppy arguments load if the |value| is nullptr or store
+  // otherwise. Returns either the loaded value or |value|.
+  compiler::Node* EmitKeyedSloppyArguments(compiler::Node* receiver,
+                                           compiler::Node* key,
+                                           compiler::Node* value,
+                                           Label* bailout);
+
   static const int kElementLoopUnrollThreshold = 8;
 };
 
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index fa7a49e..a294d56 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -7,6 +7,7 @@
 #include <memory>
 
 #include "src/bailout-reason.h"
+#include "src/code-factory.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/crankshaft/lithium.h"
 #include "src/field-index.h"
@@ -37,7 +38,7 @@
 class CodeStubGraphBuilderBase : public HGraphBuilder {
  public:
   explicit CodeStubGraphBuilderBase(CompilationInfo* info, CodeStub* code_stub)
-      : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor()),
+      : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor(), false),
         arguments_length_(NULL),
         info_(info),
         code_stub_(code_stub),
@@ -59,7 +60,8 @@
     return parameters_[parameter];
   }
   Representation GetParameterRepresentation(int parameter) {
-    return RepresentationFromType(descriptor_.GetParameterType(parameter));
+    return RepresentationFromMachineType(
+        descriptor_.GetParameterType(parameter));
   }
   bool IsParameterCountRegister(int index) const {
     return descriptor_.GetRegisterParameter(index)
@@ -83,10 +85,6 @@
   HValue* BuildPushElement(HValue* object, HValue* argc,
                            HValue* argument_elements, ElementsKind kind);
 
-  HValue* UnmappedCase(HValue* elements, HValue* key, HValue* value);
-  HValue* EmitKeyedSloppyArguments(HValue* receiver, HValue* key,
-                                   HValue* value);
-
   HValue* BuildToString(HValue* input, bool convert);
   HValue* BuildToPrimitive(HValue* input, HValue* input_map);
 
@@ -129,8 +127,8 @@
                               HParameter::STACK_PARAMETER, r);
     } else {
       param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
-      start_environment->Bind(i, param);
     }
+    start_environment->Bind(i, param);
     parameters_[i] = param;
     if (i < register_param_count && IsParameterCountRegister(i)) {
       param->set_type(HType::Smi());
@@ -334,7 +332,7 @@
 HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
   info()->MarkAsSavesCallerDoubles();
   HValue* number = GetParameter(Descriptor::kArgument);
-  return BuildNumberToString(number, Type::Number());
+  return BuildNumberToString(number, AstType::Number());
 }
 
 
@@ -342,119 +340,6 @@
   return DoGenerateCode(this);
 }
 
-
-template <>
-HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
-  Factory* factory = isolate()->factory();
-  HValue* undefined = graph()->GetConstantUndefined();
-  AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
-  HValue* closure = GetParameter(Descriptor::kClosure);
-  HValue* literal_index = GetParameter(Descriptor::kLiteralIndex);
-
-  // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
-  // point and wasn't caught since it wasn't built in the snapshot. We should
-  // probably just replace with a TurboFan stub rather than fixing it.
-#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
-  // This stub is very performance sensitive, the generated code must be tuned
-  // so that it doesn't build and eager frame.
-  info()->MarkMustNotHaveEagerFrame();
-#endif
-
-  HValue* literals_array = Add<HLoadNamedField>(
-      closure, nullptr, HObjectAccess::ForLiteralsPointer());
-
-  HInstruction* allocation_site = Add<HLoadKeyed>(
-      literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
-      NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
-  IfBuilder checker(this);
-  checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
-                                                    undefined);
-  checker.Then();
-
-  HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
-      AllocationSite::kTransitionInfoOffset);
-  HInstruction* boilerplate =
-      Add<HLoadNamedField>(allocation_site, nullptr, access);
-  HValue* elements = AddLoadElements(boilerplate);
-  HValue* capacity = AddLoadFixedArrayLength(elements);
-  IfBuilder zero_capacity(this);
-  zero_capacity.If<HCompareNumericAndBranch>(capacity, graph()->GetConstant0(),
-                                           Token::EQ);
-  zero_capacity.Then();
-  Push(BuildCloneShallowArrayEmpty(boilerplate,
-                                   allocation_site,
-                                   alloc_site_mode));
-  zero_capacity.Else();
-  IfBuilder if_fixed_cow(this);
-  if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
-  if_fixed_cow.Then();
-  Push(BuildCloneShallowArrayCow(boilerplate,
-                                 allocation_site,
-                                 alloc_site_mode,
-                                 FAST_ELEMENTS));
-  if_fixed_cow.Else();
-  IfBuilder if_fixed(this);
-  if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
-  if_fixed.Then();
-  Push(BuildCloneShallowArrayNonEmpty(boilerplate,
-                                      allocation_site,
-                                      alloc_site_mode,
-                                      FAST_ELEMENTS));
-
-  if_fixed.Else();
-  Push(BuildCloneShallowArrayNonEmpty(boilerplate,
-                                      allocation_site,
-                                      alloc_site_mode,
-                                      FAST_DOUBLE_ELEMENTS));
-  if_fixed.End();
-  if_fixed_cow.End();
-  zero_capacity.End();
-
-  checker.ElseDeopt(DeoptimizeReason::kUninitializedBoilerplateLiterals);
-  checker.End();
-
-  return environment()->Pop();
-}
-
-
-Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
-  int context_index = casted_stub()->context_index();
-  int slot_index = casted_stub()->slot_index();
-
-  HValue* script_context = BuildGetScriptContext(context_index);
-  return Add<HLoadNamedField>(script_context, nullptr,
-                              HObjectAccess::ForContextSlot(slot_index));
-}
-
-
-Handle<Code> LoadScriptContextFieldStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreScriptContextFieldStub>::BuildCodeStub() {
-  int context_index = casted_stub()->context_index();
-  int slot_index = casted_stub()->slot_index();
-
-  HValue* script_context = BuildGetScriptContext(context_index);
-  Add<HStoreNamedField>(script_context,
-                        HObjectAccess::ForContextSlot(slot_index),
-                        GetParameter(2), STORE_TO_INITIALIZED_ENTRY);
-  // TODO(ishell): Remove this unused stub.
-  return GetParameter(2);
-}
-
-
-Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
 HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
                                                    HValue* argument_elements,
                                                    ElementsKind kind) {
@@ -505,6 +390,7 @@
 HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
   // TODO(verwaest): Fix deoptimizer messages.
   HValue* argc = GetArgumentsLength();
+
   HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
   HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
                                                  graph()->GetConstantMinus1());
@@ -904,155 +790,6 @@
 Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
 
 
-HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
-                                               HValue* value) {
-  HValue* result = NULL;
-  HInstruction* backing_store =
-      Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, nullptr,
-                      FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-  Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
-  HValue* backing_store_length = Add<HLoadNamedField>(
-      backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
-  IfBuilder in_unmapped_range(this);
-  in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
-                                                 Token::LT);
-  in_unmapped_range.Then();
-  {
-    if (value == NULL) {
-      result = Add<HLoadKeyed>(backing_store, key, nullptr, nullptr,
-                               FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
-    } else {
-      Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
-    }
-  }
-  in_unmapped_range.ElseDeopt(DeoptimizeReason::kOutsideOfRange);
-  in_unmapped_range.End();
-  return result;
-}
-
-
-HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
-                                                           HValue* key,
-                                                           HValue* value) {
-  // Mapped arguments are actual arguments. Unmapped arguments are values added
-  // to the arguments object after it was created for the call. Mapped arguments
-  // are stored in the context at indexes given by elements[key + 2]. Unmapped
-  // arguments are stored as regular indexed properties in the arguments array,
-  // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
-  // look at argument object construction.
-  //
-  // The sloppy arguments elements array has a special format:
-  //
-  // 0: context
-  // 1: unmapped arguments array
-  // 2: mapped_index0,
-  // 3: mapped_index1,
-  // ...
-  //
-  // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
-  // If key + 2 >= elements.length then attempt to look in the unmapped
-  // arguments array (given by elements[1]) and return the value at key, missing
-  // to the runtime if the unmapped arguments array is not a fixed array or if
-  // key >= unmapped_arguments_array.length.
-  //
-  // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
-  // in the unmapped arguments array, as described above. Otherwise, t is a Smi
-  // index into the context array given at elements[0]. Return the value at
-  // context[t].
-
-  bool is_load = value == NULL;
-
-  key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
-  IfBuilder positive_smi(this);
-  positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
-                                            Token::LT);
-  positive_smi.ThenDeopt(DeoptimizeReason::kKeyIsNegative);
-  positive_smi.End();
-
-  HValue* constant_two = Add<HConstant>(2);
-  HValue* elements = AddLoadElements(receiver, nullptr);
-  HValue* elements_length = Add<HLoadNamedField>(
-      elements, nullptr, HObjectAccess::ForFixedArrayLength());
-  HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
-  IfBuilder in_range(this);
-  in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
-  in_range.Then();
-  {
-    HValue* index = AddUncasted<HAdd>(key, constant_two);
-    HInstruction* mapped_index =
-        Add<HLoadKeyed>(elements, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS,
-                        ALLOW_RETURN_HOLE);
-
-    IfBuilder is_valid(this);
-    is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
-                                              graph()->GetConstantHole());
-    is_valid.Then();
-    {
-      // TODO(mvstanton): I'd like to assert from this point, that if the
-      // mapped_index is not the hole that it is indeed, a smi. An unnecessary
-      // smi check is being emitted.
-      HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
-                                            nullptr, nullptr, FAST_ELEMENTS);
-      STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
-      if (is_load) {
-        HValue* result =
-            Add<HLoadKeyed>(the_context, mapped_index, nullptr, nullptr,
-                            FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-        environment()->Push(result);
-      } else {
-        DCHECK(value != NULL);
-        Add<HStoreKeyed>(the_context, mapped_index, value, nullptr,
-                         FAST_ELEMENTS);
-        environment()->Push(value);
-      }
-    }
-    is_valid.Else();
-    {
-      HValue* result = UnmappedCase(elements, key, value);
-      environment()->Push(is_load ? result : value);
-    }
-    is_valid.End();
-  }
-  in_range.Else();
-  {
-    HValue* result = UnmappedCase(elements, key, value);
-    environment()->Push(is_load ? result : value);
-  }
-  in_range.End();
-
-  return environment()->Pop();
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(Descriptor::kReceiver);
-  HValue* key = GetParameter(Descriptor::kName);
-
-  return EmitKeyedSloppyArguments(receiver, key, NULL);
-}
-
-
-Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<KeyedStoreSloppyArgumentsStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(Descriptor::kReceiver);
-  HValue* key = GetParameter(Descriptor::kName);
-  HValue* value = GetParameter(Descriptor::kValue);
-
-  return EmitKeyedSloppyArguments(receiver, key, value);
-}
-
-
-Handle<Code> KeyedStoreSloppyArgumentsStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
 void CodeStubGraphBuilderBase::BuildStoreNamedField(
     HValue* object, HValue* value, FieldIndex index,
     Representation representation, bool transition_to_field) {
@@ -1099,99 +836,6 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<StoreFieldStub>::BuildCodeStub() {
-  BuildStoreNamedField(GetParameter(Descriptor::kReceiver),
-                       GetParameter(Descriptor::kValue), casted_stub()->index(),
-                       casted_stub()->representation(), false);
-  return GetParameter(Descriptor::kValue);
-}
-
-
-Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
-  HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
-  HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
-  StoreTransitionStub::StoreMode store_mode = casted_stub()->store_mode();
-
-  if (store_mode != StoreTransitionStub::StoreMapOnly) {
-    value = GetParameter(StoreTransitionHelper::ValueIndex());
-    Representation representation = casted_stub()->representation();
-    if (representation.IsDouble()) {
-      // In case we are storing a double, assure that the value is a double
-      // before manipulating the properties backing store. Otherwise the actual
-      // store may deopt, leaving the backing store in an overallocated state.
-      value = AddUncasted<HForceRepresentation>(value, representation);
-    }
-  }
-
-  switch (store_mode) {
-    case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
-      HValue* properties = Add<HLoadNamedField>(
-          object, nullptr, HObjectAccess::ForPropertiesPointer());
-      HValue* length = AddLoadFixedArrayLength(properties);
-      HValue* delta =
-          Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
-      HValue* new_capacity = AddUncasted<HAdd>(length, delta);
-
-      // Grow properties array.
-      ElementsKind kind = FAST_ELEMENTS;
-      Add<HBoundsCheck>(new_capacity,
-                        Add<HConstant>((Page::kMaxRegularHeapObjectSize -
-                                        FixedArray::kHeaderSize) >>
-                                       ElementsKindToShiftSize(kind)));
-
-      // Reuse this code for properties backing store allocation.
-      HValue* new_properties =
-          BuildAllocateAndInitializeArray(kind, new_capacity);
-
-      BuildCopyProperties(properties, new_properties, length, new_capacity);
-
-      Add<HStoreNamedField>(object, HObjectAccess::ForPropertiesPointer(),
-                            new_properties);
-    }
-    // Fall through.
-    case StoreTransitionStub::StoreMapAndValue:
-      // Store the new value into the "extended" object.
-      BuildStoreNamedField(object, value, casted_stub()->index(),
-                           casted_stub()->representation(), true);
-    // Fall through.
-
-    case StoreTransitionStub::StoreMapOnly:
-      // And finally update the map.
-      Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
-                            GetParameter(StoreTransitionHelper::MapIndex()));
-      break;
-  }
-  return value;
-}
-
-
-Handle<Code> StoreTransitionStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
-  BuildUncheckedMonomorphicElementAccess(
-      GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
-      GetParameter(Descriptor::kValue), casted_stub()->is_js_array(),
-      casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE,
-      casted_stub()->store_mode());
-
-  return GetParameter(Descriptor::kValue);
-}
-
-
-Handle<Code> StoreFastElementStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
   ElementsKind const from_kind = casted_stub()->from_kind();
   ElementsKind const to_kind = casted_stub()->to_kind();
@@ -1262,26 +906,26 @@
   HValue* left = GetParameter(Descriptor::kLeft);
   HValue* right = GetParameter(Descriptor::kRight);
 
-  Type* left_type = state.GetLeftType();
-  Type* right_type = state.GetRightType();
-  Type* result_type = state.GetResultType();
+  AstType* left_type = state.GetLeftType();
+  AstType* right_type = state.GetRightType();
+  AstType* result_type = state.GetResultType();
 
-  DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
-         (state.HasSideEffects() || !result_type->Is(Type::None())));
+  DCHECK(!left_type->Is(AstType::None()) && !right_type->Is(AstType::None()) &&
+         (state.HasSideEffects() || !result_type->Is(AstType::None())));
 
   HValue* result = NULL;
   HAllocationMode allocation_mode(NOT_TENURED);
-  if (state.op() == Token::ADD &&
-      (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
-      !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+  if (state.op() == Token::ADD && (left_type->Maybe(AstType::String()) ||
+                                   right_type->Maybe(AstType::String())) &&
+      !left_type->Is(AstType::String()) && !right_type->Is(AstType::String())) {
     // For the generic add stub a fast case for string addition is performance
     // critical.
-    if (left_type->Maybe(Type::String())) {
+    if (left_type->Maybe(AstType::String())) {
       IfBuilder if_leftisstring(this);
       if_leftisstring.If<HIsStringAndBranch>(left);
       if_leftisstring.Then();
       {
-        Push(BuildBinaryOperation(state.op(), left, right, Type::String(),
+        Push(BuildBinaryOperation(state.op(), left, right, AstType::String(),
                                   right_type, result_type,
                                   state.fixed_right_arg(), allocation_mode));
       }
@@ -1299,7 +943,7 @@
       if_rightisstring.Then();
       {
         Push(BuildBinaryOperation(state.op(), left, right, left_type,
-                                  Type::String(), result_type,
+                                  AstType::String(), result_type,
                                   state.fixed_right_arg(), allocation_mode));
       }
       if_rightisstring.Else();
@@ -1340,9 +984,9 @@
   HValue* left = GetParameter(Descriptor::kLeft);
   HValue* right = GetParameter(Descriptor::kRight);
 
-  Type* left_type = state.GetLeftType();
-  Type* right_type = state.GetRightType();
-  Type* result_type = state.GetResultType();
+  AstType* left_type = state.GetLeftType();
+  AstType* right_type = state.GetRightType();
+  AstType* result_type = state.GetResultType();
   HAllocationMode allocation_mode(allocation_site);
 
   return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
@@ -1363,7 +1007,7 @@
   if_inputissmi.Then();
   {
     // Convert the input smi to a string.
-    Push(BuildNumberToString(input, Type::SignedSmall()));
+    Push(BuildNumberToString(input, AstType::SignedSmall()));
   }
   if_inputissmi.Else();
   {
@@ -1399,10 +1043,10 @@
       }
       if_inputisprimitive.End();
       // Convert the primitive to a string value.
-      ToStringStub stub(isolate());
       HValue* values[] = {context(), Pop()};
-      Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(stub.GetCode()), 0,
-                                            stub.GetCallInterfaceDescriptor(),
+      Callable toString = CodeFactory::ToString(isolate());
+      Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
+                                            toString.descriptor(),
                                             ArrayVector(values)));
     }
     if_inputisstring.End();
@@ -1531,134 +1175,6 @@
 Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
 
 template <>
-HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
-  StoreGlobalStub* stub = casted_stub();
-  HParameter* value = GetParameter(Descriptor::kValue);
-  if (stub->check_global()) {
-    // Check that the map of the global has not changed: use a placeholder map
-    // that will be replaced later with the global object's map.
-    HParameter* proxy = GetParameter(Descriptor::kReceiver);
-    HValue* proxy_map =
-        Add<HLoadNamedField>(proxy, nullptr, HObjectAccess::ForMap());
-    HValue* global =
-        Add<HLoadNamedField>(proxy_map, nullptr, HObjectAccess::ForPrototype());
-    HValue* map_cell = Add<HConstant>(isolate()->factory()->NewWeakCell(
-        StoreGlobalStub::global_map_placeholder(isolate())));
-    HValue* expected_map = Add<HLoadNamedField>(
-        map_cell, nullptr, HObjectAccess::ForWeakCellValue());
-    HValue* map =
-        Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
-    IfBuilder map_check(this);
-    map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
-    map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
-    map_check.End();
-  }
-
-  HValue* weak_cell = Add<HConstant>(isolate()->factory()->NewWeakCell(
-      StoreGlobalStub::property_cell_placeholder(isolate())));
-  HValue* cell = Add<HLoadNamedField>(weak_cell, nullptr,
-                                      HObjectAccess::ForWeakCellValue());
-  Add<HCheckHeapObject>(cell);
-  HObjectAccess access = HObjectAccess::ForPropertyCellValue();
-  // Load the payload of the global parameter cell. A hole indicates that the
-  // cell has been invalidated and that the store must be handled by the
-  // runtime.
-  HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
-
-  auto cell_type = stub->cell_type();
-  if (cell_type == PropertyCellType::kConstant ||
-      cell_type == PropertyCellType::kUndefined) {
-    // This is always valid for all states a cell can be in.
-    IfBuilder builder(this);
-    builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
-    builder.Then();
-    builder.ElseDeopt(
-        DeoptimizeReason::kUnexpectedCellContentsInConstantGlobalStore);
-    builder.End();
-  } else {
-    IfBuilder builder(this);
-    HValue* hole_value = graph()->GetConstantHole();
-    builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
-    builder.Then();
-    builder.Deopt(DeoptimizeReason::kUnexpectedCellContentsInGlobalStore);
-    builder.Else();
-    // When dealing with constant types, the type may be allowed to change, as
-    // long as optimized code remains valid.
-    if (cell_type == PropertyCellType::kConstantType) {
-      switch (stub->constant_type()) {
-        case PropertyCellConstantType::kSmi:
-          access = access.WithRepresentation(Representation::Smi());
-          break;
-        case PropertyCellConstantType::kStableMap: {
-          // It is sufficient here to check that the value and cell contents
-          // have identical maps, no matter if they are stable or not or if they
-          // are the maps that were originally in the cell or not. If optimized
-          // code will deopt when a cell has a unstable map and if it has a
-          // dependency on a stable map, it will deopt if the map destabilizes.
-          Add<HCheckHeapObject>(value);
-          Add<HCheckHeapObject>(cell_contents);
-          HValue* expected_map = Add<HLoadNamedField>(cell_contents, nullptr,
-                                                      HObjectAccess::ForMap());
-          HValue* map =
-              Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
-          IfBuilder map_check(this);
-          map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
-          map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
-          map_check.End();
-          access = access.WithRepresentation(Representation::HeapObject());
-          break;
-        }
-      }
-    }
-    Add<HStoreNamedField>(cell, access, value);
-    builder.End();
-  }
-
-  return value;
-}
-
-
-Handle<Code> StoreGlobalStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
-  HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
-  HValue* key = GetParameter(StoreTransitionHelper::NameIndex());
-  HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
-  HValue* map = GetParameter(StoreTransitionHelper::MapIndex());
-
-  if (FLAG_trace_elements_transitions) {
-    // Tracing elements transitions is the job of the runtime.
-    Add<HDeoptimize>(DeoptimizeReason::kTracingElementsTransitions,
-                     Deoptimizer::EAGER);
-  } else {
-    info()->MarkAsSavesCallerDoubles();
-
-    BuildTransitionElementsKind(object, map,
-                                casted_stub()->from_kind(),
-                                casted_stub()->to_kind(),
-                                casted_stub()->is_jsarray());
-
-    BuildUncheckedMonomorphicElementAccess(object, key, value,
-                                           casted_stub()->is_jsarray(),
-                                           casted_stub()->to_kind(),
-                                           STORE, ALLOW_RETURN_HOLE,
-                                           casted_stub()->store_mode());
-  }
-
-  return value;
-}
-
-
-Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
   HValue* receiver = GetParameter(Descriptor::kReceiver);
   HValue* key = GetParameter(Descriptor::kName);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 2b71716..b899943 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -6,6 +6,7 @@
 
 #include <sstream>
 
+#include "src/ast/ast.h"
 #include "src/bootstrapper.h"
 #include "src/code-factory.h"
 #include "src/code-stub-assembler.h"
@@ -14,7 +15,6 @@
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
 #include "src/macro-assembler.h"
-#include "src/parsing/parser.h"
 
 namespace v8 {
 namespace internal {
@@ -498,6 +498,140 @@
   assembler->KeyedLoadIC(&p);
 }
 
+void StoreICTrampolineTFStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+                                         vector);
+  assembler->StoreIC(&p);
+}
+
+void StoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+                                         vector);
+  assembler->StoreIC(&p);
+}
+
+void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* map = assembler->Parameter(Descriptor::kMap);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+
+  assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
+  assembler->Return(value);
+}
+
+void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* offset =
+      assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* map = assembler->Parameter(Descriptor::kMap);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  Representation representation = this->representation();
+  assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
+                     is_inobject(), representation.Mnemonic());
+
+  Node* prepared_value =
+      assembler->PrepareValueForWrite(value, representation, &miss);
+
+  if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
+    assembler->Comment("Extend storage");
+    assembler->ExtendPropertiesBackingStore(receiver);
+  } else {
+    DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
+  }
+
+  // Store the new value into the "extended" object.
+  assembler->Comment("Store value");
+  assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
+                             prepared_value, true);
+
+  // And finally update the map.
+  assembler->Comment("Store map");
+  assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
+  assembler->Return(value);
+
+  // Only store to tagged field never bails out.
+  if (!representation.IsTagged()) {
+    assembler->Bind(&miss);
+    {
+      assembler->Comment("Miss");
+      assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                                 vector, receiver, name);
+    }
+  }
+}
+
+void ElementsTransitionAndStoreStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* key = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* map = assembler->Parameter(Descriptor::kMap);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->Comment(
+      "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
+      " is_jsarray=%d, store_mode=%d",
+      ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
+      is_jsarray(), store_mode());
+
+  Label miss(assembler);
+
+  if (FLAG_trace_elements_transitions) {
+    // Tracing elements transitions is the job of the runtime.
+    assembler->Goto(&miss);
+  } else {
+    assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
+                                      is_jsarray(), &miss);
+    assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
+                                store_mode(), &miss);
+    assembler->Return(value);
+  }
+
+  assembler->Bind(&miss);
+  {
+    assembler->Comment("Miss");
+    assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
+                               context, receiver, key, value, map, slot,
+                               vector);
+  }
+}
+
 void AllocateHeapNumberStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
@@ -599,9 +733,8 @@
         // Check if the {rhs} is a HeapNumber.
         Label if_rhsisnumber(assembler),
             if_rhsisnotnumber(assembler, Label::kDeferred);
-        Node* number_map = assembler->HeapNumberMapConstant();
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &if_rhsisnumber, &if_rhsisnotnumber);
+        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+                          &if_rhsisnotnumber);
 
         assembler->Bind(&if_rhsisnumber);
         {
@@ -618,9 +751,7 @@
           // Check if the {rhs} is a String.
           Label if_rhsisstring(assembler, Label::kDeferred),
               if_rhsisnotstring(assembler, Label::kDeferred);
-          assembler->Branch(assembler->Int32LessThan(
-                                rhs_instance_type,
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
                             &if_rhsisstring, &if_rhsisnotstring);
 
           assembler->Bind(&if_rhsisstring);
@@ -636,9 +767,7 @@
             Label if_rhsisreceiver(assembler, Label::kDeferred),
                 if_rhsisnotreceiver(assembler, Label::kDeferred);
             assembler->Branch(
-                assembler->Int32LessThanOrEqual(
-                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                    rhs_instance_type),
+                assembler->IsJSReceiverInstanceType(rhs_instance_type),
                 &if_rhsisreceiver, &if_rhsisnotreceiver);
 
             assembler->Bind(&if_rhsisreceiver);
@@ -670,9 +799,7 @@
 
       // Check if {lhs} is a String.
       Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
-      assembler->Branch(assembler->Int32LessThan(
-                            lhs_instance_type,
-                            assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+      assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
                         &if_lhsisstring, &if_lhsisnotstring);
 
       assembler->Bind(&if_lhsisstring);
@@ -714,9 +841,7 @@
             Label if_lhsisreceiver(assembler, Label::kDeferred),
                 if_lhsisnotreceiver(assembler, Label::kDeferred);
             assembler->Branch(
-                assembler->Int32LessThanOrEqual(
-                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                    lhs_instance_type),
+                assembler->IsJSReceiverInstanceType(lhs_instance_type),
                 &if_lhsisreceiver, &if_lhsisnotreceiver);
 
             assembler->Bind(&if_lhsisreceiver);
@@ -746,9 +871,7 @@
 
           // Check if {rhs} is a String.
           Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
-          assembler->Branch(assembler->Int32LessThan(
-                                rhs_instance_type,
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
                             &if_rhsisstring, &if_rhsisnotstring);
 
           assembler->Bind(&if_rhsisstring);
@@ -791,9 +914,7 @@
                 Label if_rhsisreceiver(assembler, Label::kDeferred),
                     if_rhsisnotreceiver(assembler, Label::kDeferred);
                 assembler->Branch(
-                    assembler->Int32LessThanOrEqual(
-                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                        rhs_instance_type),
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
                     &if_rhsisreceiver, &if_rhsisnotreceiver);
 
                 assembler->Bind(&if_rhsisreceiver);
@@ -822,9 +943,7 @@
               Label if_lhsisreceiver(assembler, Label::kDeferred),
                   if_lhsisnotreceiver(assembler);
               assembler->Branch(
-                  assembler->Int32LessThanOrEqual(
-                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                      lhs_instance_type),
+                  assembler->IsJSReceiverInstanceType(lhs_instance_type),
                   &if_lhsisreceiver, &if_lhsisnotreceiver);
 
               assembler->Bind(&if_lhsisreceiver);
@@ -842,9 +961,7 @@
                 Label if_rhsisreceiver(assembler, Label::kDeferred),
                     if_rhsisnotreceiver(assembler, Label::kDeferred);
                 assembler->Branch(
-                    assembler->Int32LessThanOrEqual(
-                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                        rhs_instance_type),
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
                     &if_rhsisreceiver, &if_rhsisnotreceiver);
 
                 assembler->Bind(&if_rhsisreceiver);
@@ -917,7 +1034,7 @@
 
   // Shared entry for floating point addition.
   Label do_fadd(assembler), end(assembler),
-      call_add_stub(assembler, Label::kDeferred);
+      do_add_any(assembler, Label::kDeferred), call_add_stub(assembler);
   Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
       var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
       var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -965,9 +1082,7 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(
-          assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
-          &call_add_stub);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
 
       var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -977,14 +1092,14 @@
 
   assembler->Bind(&if_lhsisnotsmi);
   {
+    Label check_string(assembler);
+
     // Load the map of {lhs}.
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if {lhs} is a HeapNumber.
     Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-    assembler->GotoUnless(
-        assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
-        &call_add_stub);
+    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map), &check_string);
 
     // Check if the {rhs} is Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -1003,14 +1118,34 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      Node* number_map = assembler->HeapNumberMapConstant();
-      assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
-                            &call_add_stub);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
 
       var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
       assembler->Goto(&do_fadd);
     }
+
+    assembler->Bind(&check_string);
+    {
+      // Check if the {rhs} is a smi, and exit the string check early if it is.
+      assembler->GotoIf(assembler->WordIsSmi(rhs), &do_add_any);
+
+      Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+      // Exit unless {lhs} is a string
+      assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
+                            &do_add_any);
+
+      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+      // Exit unless {rhs} is a string
+      assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
+                            &do_add_any);
+
+      var_type_feedback.Bind(
+          assembler->Int32Constant(BinaryOperationFeedback::kString));
+      assembler->Goto(&call_add_stub);
+    }
   }
 
   assembler->Bind(&do_fadd);
@@ -1024,10 +1159,15 @@
     assembler->Goto(&end);
   }
 
-  assembler->Bind(&call_add_stub);
+  assembler->Bind(&do_add_any);
   {
     var_type_feedback.Bind(
         assembler->Int32Constant(BinaryOperationFeedback::kAny));
+    assembler->Goto(&call_add_stub);
+  }
+
+  assembler->Bind(&call_add_stub);
+  {
     Callable callable = CodeFactory::Add(assembler->isolate());
     var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
     assembler->Goto(&end);
@@ -1111,9 +1251,8 @@
         // Check if {rhs} is a HeapNumber.
         Label if_rhsisnumber(assembler),
             if_rhsisnotnumber(assembler, Label::kDeferred);
-        Node* number_map = assembler->HeapNumberMapConstant();
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &if_rhsisnumber, &if_rhsisnotnumber);
+        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+                          &if_rhsisnotnumber);
 
         assembler->Bind(&if_rhsisnumber);
         {
@@ -1274,9 +1413,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if {rhs} is a HeapNumber.
-      assembler->GotoUnless(
-          assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
-          &call_subtract_stub);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+                            &call_subtract_stub);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -1291,9 +1429,8 @@
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if the {lhs} is a HeapNumber.
-    assembler->GotoUnless(
-        assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
-        &call_subtract_stub);
+    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
+                          &call_subtract_stub);
 
     // Check if the {rhs} is a Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -1313,9 +1450,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(
-          assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
-          &call_subtract_stub);
+      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+                            &call_subtract_stub);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -1713,7 +1849,7 @@
         Node* untagged_result =
             assembler->Int32Div(untagged_dividend, untagged_divisor);
         Node* truncated =
-            assembler->IntPtrMul(untagged_result, untagged_divisor);
+            assembler->Int32Mul(untagged_result, untagged_divisor);
         // Do floating point division if the remainder is not 0.
         assembler->GotoIf(
             assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
@@ -1916,7 +2052,7 @@
 
       Node* untagged_result =
           assembler->Int32Div(untagged_dividend, untagged_divisor);
-      Node* truncated = assembler->IntPtrMul(untagged_result, untagged_divisor);
+      Node* truncated = assembler->Int32Mul(untagged_result, untagged_divisor);
       // Do floating point division if the remainder is not 0.
       assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
                         &bailout);
@@ -2441,8 +2577,7 @@
       Label if_valueisnumber(assembler),
           if_valuenotnumber(assembler, Label::kDeferred);
       Node* value_map = assembler->LoadMap(value);
-      Node* number_map = assembler->HeapNumberMapConstant();
-      assembler->Branch(assembler->WordEqual(value_map, number_map),
+      assembler->Branch(assembler->IsHeapNumberMap(value_map),
                         &if_valueisnumber, &if_valuenotnumber);
 
       assembler->Bind(&if_valueisnumber);
@@ -2545,8 +2680,7 @@
       Label if_valueisnumber(assembler),
           if_valuenotnumber(assembler, Label::kDeferred);
       Node* value_map = assembler->LoadMap(value);
-      Node* number_map = assembler->HeapNumberMapConstant();
-      assembler->Branch(assembler->WordEqual(value_map, number_map),
+      assembler->Branch(assembler->IsHeapNumberMap(value_map),
                         &if_valueisnumber, &if_valuenotnumber);
 
       assembler->Bind(&if_valueisnumber);
@@ -2587,6 +2721,15 @@
   return result_var.value();
 }
 
+// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
+compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
+                                        compiler::Node* string,
+                                        compiler::Node* from,
+                                        compiler::Node* to,
+                                        compiler::Node* context) {
+  return assembler->SubString(context, string, from, to);
+}
+
 // ES6 section 7.1.13 ToObject (argument)
 void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
@@ -2601,43 +2744,38 @@
   Node* context = assembler->Parameter(Descriptor::kContext);
 
   Variable constructor_function_index_var(assembler,
-                                          MachineRepresentation::kWord32);
+                                          MachineType::PointerRepresentation());
 
   assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
 
   assembler->Bind(&if_notsmi);
   Node* map = assembler->LoadMap(object);
 
-  assembler->GotoIf(
-      assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
-      &if_number);
+  assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
 
   Node* instance_type = assembler->LoadMapInstanceType(map);
-  assembler->GotoIf(
-      assembler->Int32GreaterThanOrEqual(
-          instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
-      &if_jsreceiver);
+  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+                    &if_jsreceiver);
 
-  Node* constructor_function_index = assembler->LoadObjectField(
-      map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
-      MachineType::Uint8());
-  assembler->GotoIf(
-      assembler->Word32Equal(
-          constructor_function_index,
-          assembler->Int32Constant(Map::kNoConstructorFunctionIndex)),
-      &if_noconstructor);
+  Node* constructor_function_index =
+      assembler->LoadMapConstructorFunctionIndex(map);
+  assembler->GotoIf(assembler->WordEqual(constructor_function_index,
+                                         assembler->IntPtrConstant(
+                                             Map::kNoConstructorFunctionIndex)),
+                    &if_noconstructor);
   constructor_function_index_var.Bind(constructor_function_index);
   assembler->Goto(&if_wrapjsvalue);
 
   assembler->Bind(&if_number);
   constructor_function_index_var.Bind(
-      assembler->Int32Constant(Context::NUMBER_FUNCTION_INDEX));
+      assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
   assembler->Goto(&if_wrapjsvalue);
 
   assembler->Bind(&if_wrapjsvalue);
   Node* native_context = assembler->LoadNativeContext(context);
   Node* constructor = assembler->LoadFixedArrayElement(
-      native_context, constructor_function_index_var.value());
+      native_context, constructor_function_index_var.value(), 0,
+      CodeStubAssembler::INTPTR_PARAMETERS);
   Node* initial_map = assembler->LoadObjectField(
       constructor, JSFunction::kPrototypeOrInitialMapOffset);
   Node* js_value = assembler->Allocate(JSValue::kSize);
@@ -2679,9 +2817,7 @@
 
   Node* map = assembler->LoadMap(value);
 
-  assembler->GotoIf(
-      assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
-      &return_number);
+  assembler->GotoIf(assembler->IsHeapNumberMap(map), &return_number);
 
   Node* instance_type = assembler->LoadMapInstanceType(map);
 
@@ -2703,15 +2839,11 @@
                                                assembler->Int32Constant(0)),
                         &return_undefined);
 
-  assembler->GotoIf(
-      assembler->Int32GreaterThanOrEqual(
-          instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
-      &return_object);
+  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+                    &return_object);
 
-  assembler->GotoIf(
-      assembler->Int32LessThan(instance_type,
-                               assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
-      &return_string);
+  assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
+                    &return_string);
 
 #define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type)    \
   Label return_##type(assembler);                                  \
@@ -2908,11 +3040,10 @@
         Node* rhs_map = assembler->LoadMap(rhs);
 
         // Check if the {rhs} is a HeapNumber.
-        Node* number_map = assembler->HeapNumberMapConstant();
         Label if_rhsisnumber(assembler),
             if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &if_rhsisnumber, &if_rhsisnotnumber);
+        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+                          &if_rhsisnotnumber);
 
         assembler->Bind(&if_rhsisnumber);
         {
@@ -3028,9 +3159,7 @@
           // Check if {lhs} is a String.
           Label if_lhsisstring(assembler),
               if_lhsisnotstring(assembler, Label::kDeferred);
-          assembler->Branch(assembler->Int32LessThan(
-                                lhs_instance_type,
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+          assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
                             &if_lhsisstring, &if_lhsisnotstring);
 
           assembler->Bind(&if_lhsisstring);
@@ -3041,10 +3170,9 @@
             // Check if {rhs} is also a String.
             Label if_rhsisstring(assembler, Label::kDeferred),
                 if_rhsisnotstring(assembler, Label::kDeferred);
-            assembler->Branch(assembler->Int32LessThan(
-                                  rhs_instance_type, assembler->Int32Constant(
-                                                         FIRST_NONSTRING_TYPE)),
-                              &if_rhsisstring, &if_rhsisnotstring);
+            assembler->Branch(
+                assembler->IsStringInstanceType(rhs_instance_type),
+                &if_rhsisstring, &if_rhsisnotstring);
 
             assembler->Bind(&if_rhsisstring);
             {
@@ -3088,9 +3216,7 @@
               Label if_rhsisreceiver(assembler, Label::kDeferred),
                   if_rhsisnotreceiver(assembler, Label::kDeferred);
               assembler->Branch(
-                  assembler->Int32LessThanOrEqual(
-                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                      rhs_instance_type),
+                  assembler->IsJSReceiverInstanceType(rhs_instance_type),
                   &if_rhsisreceiver, &if_rhsisnotreceiver);
 
               assembler->Bind(&if_rhsisreceiver);
@@ -3122,9 +3248,7 @@
             Label if_lhsisreceiver(assembler, Label::kDeferred),
                 if_lhsisnotreceiver(assembler, Label::kDeferred);
             assembler->Branch(
-                assembler->Int32LessThanOrEqual(
-                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                    lhs_instance_type),
+                assembler->IsJSReceiverInstanceType(lhs_instance_type),
                 &if_lhsisreceiver, &if_lhsisnotreceiver);
 
             assembler->Bind(&if_lhsisreceiver);
@@ -3218,10 +3342,9 @@
     Node* value_map = assembler->LoadMap(value);
 
     // Check if {value} (and therefore {rhs}) is a HeapNumber.
-    Node* number_map = assembler->HeapNumberMapConstant();
     Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
-    assembler->Branch(assembler->WordEqual(value_map, number_map),
-                      &if_valueisnumber, &if_valueisnotnumber);
+    assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
+                      &if_valueisnotnumber);
 
     assembler->Bind(&if_valueisnumber);
     {
@@ -3342,10 +3465,9 @@
             // Check if the {rhs} is a String.
             Label if_rhsisstring(assembler, Label::kDeferred),
                 if_rhsisnotstring(assembler);
-            assembler->Branch(assembler->Int32LessThan(
-                                  rhs_instance_type, assembler->Int32Constant(
-                                                         FIRST_NONSTRING_TYPE)),
-                              &if_rhsisstring, &if_rhsisnotstring);
+            assembler->Branch(
+                assembler->IsStringInstanceType(rhs_instance_type),
+                &if_rhsisstring, &if_rhsisnotstring);
 
             assembler->Bind(&if_rhsisstring);
             {
@@ -3358,9 +3480,8 @@
             assembler->Bind(&if_rhsisnotstring);
             {
               // Check if the {rhs} is a Boolean.
-              Node* boolean_map = assembler->BooleanMapConstant();
               Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
-              assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+              assembler->Branch(assembler->IsBooleanMap(rhs_map),
                                 &if_rhsisboolean, &if_rhsisnotboolean);
 
               assembler->Bind(&if_rhsisboolean);
@@ -3378,9 +3499,7 @@
                 Label if_rhsisreceiver(assembler, Label::kDeferred),
                     if_rhsisnotreceiver(assembler);
                 assembler->Branch(
-                    assembler->Int32LessThanOrEqual(
-                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                        rhs_instance_type),
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
                     &if_rhsisreceiver, &if_rhsisnotreceiver);
 
                 assembler->Bind(&if_rhsisreceiver);
@@ -3462,10 +3581,9 @@
             // Check if {rhs} is also a String.
             Label if_rhsisstring(assembler, Label::kDeferred),
                 if_rhsisnotstring(assembler);
-            assembler->Branch(assembler->Int32LessThan(
-                                  rhs_instance_type, assembler->Int32Constant(
-                                                         FIRST_NONSTRING_TYPE)),
-                              &if_rhsisstring, &if_rhsisnotstring);
+            assembler->Branch(
+                assembler->IsStringInstanceType(rhs_instance_type),
+                &if_rhsisstring, &if_rhsisnotstring);
 
             assembler->Bind(&if_rhsisstring);
             {
@@ -3514,9 +3632,7 @@
               Label if_rhsisstring(assembler, Label::kDeferred),
                   if_rhsisnotstring(assembler);
               assembler->Branch(
-                  assembler->Int32LessThan(
-                      rhs_instance_type,
-                      assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                  assembler->IsStringInstanceType(rhs_instance_type),
                   &if_rhsisstring, &if_rhsisnotstring);
 
               assembler->Bind(&if_rhsisstring);
@@ -3534,9 +3650,7 @@
                     if_rhsisnotreceiver(assembler);
                 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
                 assembler->Branch(
-                    assembler->Int32LessThanOrEqual(
-                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                        rhs_instance_type),
+                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
                     &if_rhsisreceiver, &if_rhsisnotreceiver);
 
                 assembler->Bind(&if_rhsisreceiver);
@@ -3556,8 +3670,7 @@
                   // Check if {rhs} is a Boolean.
                   Label if_rhsisboolean(assembler),
                       if_rhsisnotboolean(assembler);
-                  Node* boolean_map = assembler->BooleanMapConstant();
-                  assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+                  assembler->Branch(assembler->IsBooleanMap(rhs_map),
                                     &if_rhsisboolean, &if_rhsisnotboolean);
 
                   assembler->Bind(&if_rhsisboolean);
@@ -3625,9 +3738,7 @@
             Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
             STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
             assembler->Branch(
-                assembler->Int32LessThanOrEqual(
-                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                    rhs_instance_type),
+                assembler->IsJSReceiverInstanceType(rhs_instance_type),
                 &if_rhsisreceiver, &if_rhsisnotreceiver);
 
             assembler->Bind(&if_rhsisreceiver);
@@ -3672,9 +3783,7 @@
               Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
               STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
               assembler->Branch(
-                  assembler->Int32LessThanOrEqual(
-                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                      rhs_instance_type),
+                  assembler->IsJSReceiverInstanceType(rhs_instance_type),
                   &if_rhsisreceiver, &if_rhsisnotreceiver);
 
               assembler->Bind(&if_rhsisreceiver);
@@ -3702,9 +3811,7 @@
             Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
             STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
             assembler->Branch(
-                assembler->Int32LessThanOrEqual(
-                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
-                    rhs_instance_type),
+                assembler->IsJSReceiverInstanceType(rhs_instance_type),
                 &if_rhsisreceiver, &if_rhsisnotreceiver);
 
             assembler->Bind(&if_rhsisreceiver);
@@ -3940,9 +4047,7 @@
 
           // Check if {lhs} is a String.
           Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
-          assembler->Branch(assembler->Int32LessThan(
-                                lhs_instance_type,
-                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+          assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
                             &if_lhsisstring, &if_lhsisnotstring);
 
           assembler->Bind(&if_lhsisstring);
@@ -3953,10 +4058,9 @@
             // Check if {rhs} is also a String.
             Label if_rhsisstring(assembler, Label::kDeferred),
                 if_rhsisnotstring(assembler);
-            assembler->Branch(assembler->Int32LessThan(
-                                  rhs_instance_type, assembler->Int32Constant(
-                                                         FIRST_NONSTRING_TYPE)),
-                              &if_rhsisstring, &if_rhsisnotstring);
+            assembler->Branch(
+                assembler->IsStringInstanceType(rhs_instance_type),
+                &if_rhsisstring, &if_rhsisnotstring);
 
             assembler->Bind(&if_rhsisstring);
             {
@@ -4057,365 +4161,6 @@
   return result.value();
 }
 
-void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
-                                        RelationalComparisonMode mode) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label if_less(assembler), if_equal(assembler), if_greater(assembler);
-
-  // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  Label if_same(assembler), if_notsame(assembler);
-  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
-  assembler->Bind(&if_same);
-  assembler->Goto(&if_equal);
-
-  assembler->Bind(&if_notsame);
-  {
-    // Load instance types of {lhs} and {rhs}.
-    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
-    // Combine the instance types into a single 16-bit value, so we can check
-    // both of them at once.
-    Node* both_instance_types = assembler->Word32Or(
-        lhs_instance_type,
-        assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
-    // Check that both {lhs} and {rhs} are flat one-byte strings.
-    int const kBothSeqOneByteStringMask =
-        kStringEncodingMask | kStringRepresentationMask |
-        ((kStringEncodingMask | kStringRepresentationMask) << 8);
-    int const kBothSeqOneByteStringTag =
-        kOneByteStringTag | kSeqStringTag |
-        ((kOneByteStringTag | kSeqStringTag) << 8);
-    Label if_bothonebyteseqstrings(assembler),
-        if_notbothonebyteseqstrings(assembler);
-    assembler->Branch(assembler->Word32Equal(
-                          assembler->Word32And(both_instance_types,
-                                               assembler->Int32Constant(
-                                                   kBothSeqOneByteStringMask)),
-                          assembler->Int32Constant(kBothSeqOneByteStringTag)),
-                      &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
-    assembler->Bind(&if_bothonebyteseqstrings);
-    {
-      // Load the length of {lhs} and {rhs}.
-      Node* lhs_length = assembler->LoadStringLength(lhs);
-      Node* rhs_length = assembler->LoadStringLength(rhs);
-
-      // Determine the minimum length.
-      Node* length = assembler->SmiMin(lhs_length, rhs_length);
-
-      // Compute the effective offset of the first character.
-      Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                              kHeapObjectTag);
-
-      // Compute the first offset after the string from the length.
-      Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
-
-      // Loop over the {lhs} and {rhs} strings to see if they are equal.
-      Variable var_offset(assembler, MachineType::PointerRepresentation());
-      Label loop(assembler, &var_offset);
-      var_offset.Bind(begin);
-      assembler->Goto(&loop);
-      assembler->Bind(&loop);
-      {
-        // Check if {offset} equals {end}.
-        Node* offset = var_offset.value();
-        Label if_done(assembler), if_notdone(assembler);
-        assembler->Branch(assembler->WordEqual(offset, end), &if_done,
-                          &if_notdone);
-
-        assembler->Bind(&if_notdone);
-        {
-          // Load the next characters from {lhs} and {rhs}.
-          Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
-          Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
-
-          // Check if the characters match.
-          Label if_valueissame(assembler), if_valueisnotsame(assembler);
-          assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
-                            &if_valueissame, &if_valueisnotsame);
-
-          assembler->Bind(&if_valueissame);
-          {
-            // Advance to next character.
-            var_offset.Bind(
-                assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
-          }
-          assembler->Goto(&loop);
-
-          assembler->Bind(&if_valueisnotsame);
-          assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
-                              &if_less, &if_greater);
-        }
-
-        assembler->Bind(&if_done);
-        {
-          // All characters up to the min length are equal, decide based on
-          // string length.
-          Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
-          assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
-                            &if_lengthisequal, &if_lengthisnotequal);
-
-          assembler->Bind(&if_lengthisequal);
-          assembler->Goto(&if_equal);
-
-          assembler->Bind(&if_lengthisnotequal);
-          assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
-                                         &if_greater);
-        }
-      }
-    }
-
-    assembler->Bind(&if_notbothonebyteseqstrings);
-    {
-      // TODO(bmeurer): Add fast case support for flattened cons strings;
-      // also add support for two byte string relational comparisons.
-      switch (mode) {
-        case kLessThan:
-          assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
-                                     rhs);
-          break;
-        case kLessThanOrEqual:
-          assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
-                                     lhs, rhs);
-          break;
-        case kGreaterThan:
-          assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
-                                     rhs);
-          break;
-        case kGreaterThanOrEqual:
-          assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
-                                     context, lhs, rhs);
-          break;
-      }
-    }
-  }
-
-  assembler->Bind(&if_less);
-  switch (mode) {
-    case kLessThan:
-    case kLessThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
-      break;
-
-    case kGreaterThan:
-    case kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(false));
-      break;
-  }
-
-  assembler->Bind(&if_equal);
-  switch (mode) {
-    case kLessThan:
-    case kGreaterThan:
-      assembler->Return(assembler->BooleanConstant(false));
-      break;
-
-    case kLessThanOrEqual:
-    case kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
-      break;
-  }
-
-  assembler->Bind(&if_greater);
-  switch (mode) {
-    case kLessThan:
-    case kLessThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(false));
-      break;
-
-    case kGreaterThan:
-    case kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
-      break;
-  }
-}
-
-void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
-  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
-  // mode; for kNegateResult mode we properly negate the result.
-  //
-  // if (lhs == rhs) return true;
-  // if (lhs->length() != rhs->length()) return false;
-  // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
-  //   return false;
-  // }
-  // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
-  //   for (i = 0; i != lhs->length(); ++i) {
-  //     if (lhs[i] != rhs[i]) return false;
-  //   }
-  //   return true;
-  // }
-  // return %StringEqual(lhs, rhs);
-
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label if_equal(assembler), if_notequal(assembler);
-
-  // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  Label if_same(assembler), if_notsame(assembler);
-  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
-  assembler->Bind(&if_same);
-  assembler->Goto(&if_equal);
-
-  assembler->Bind(&if_notsame);
-  {
-    // The {lhs} and {rhs} don't refer to the exact same String object.
-
-    // Load the length of {lhs} and {rhs}.
-    Node* lhs_length = assembler->LoadStringLength(lhs);
-    Node* rhs_length = assembler->LoadStringLength(rhs);
-
-    // Check if the lengths of {lhs} and {rhs} are equal.
-    Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
-    assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
-                      &if_lengthisequal, &if_lengthisnotequal);
-
-    assembler->Bind(&if_lengthisequal);
-    {
-      // Load instance types of {lhs} and {rhs}.
-      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
-      // Combine the instance types into a single 16-bit value, so we can check
-      // both of them at once.
-      Node* both_instance_types = assembler->Word32Or(
-          lhs_instance_type,
-          assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
-      // Check if both {lhs} and {rhs} are internalized.
-      int const kBothInternalizedMask =
-          kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
-      int const kBothInternalizedTag =
-          kInternalizedTag | (kInternalizedTag << 8);
-      Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
-      assembler->Branch(assembler->Word32Equal(
-                            assembler->Word32And(both_instance_types,
-                                                 assembler->Int32Constant(
-                                                     kBothInternalizedMask)),
-                            assembler->Int32Constant(kBothInternalizedTag)),
-                        &if_bothinternalized, &if_notbothinternalized);
-
-      assembler->Bind(&if_bothinternalized);
-      {
-        // Fast negative check for internalized-to-internalized equality.
-        assembler->Goto(&if_notequal);
-      }
-
-      assembler->Bind(&if_notbothinternalized);
-      {
-        // Check that both {lhs} and {rhs} are flat one-byte strings.
-        int const kBothSeqOneByteStringMask =
-            kStringEncodingMask | kStringRepresentationMask |
-            ((kStringEncodingMask | kStringRepresentationMask) << 8);
-        int const kBothSeqOneByteStringTag =
-            kOneByteStringTag | kSeqStringTag |
-            ((kOneByteStringTag | kSeqStringTag) << 8);
-        Label if_bothonebyteseqstrings(assembler),
-            if_notbothonebyteseqstrings(assembler);
-        assembler->Branch(
-            assembler->Word32Equal(
-                assembler->Word32And(
-                    both_instance_types,
-                    assembler->Int32Constant(kBothSeqOneByteStringMask)),
-                assembler->Int32Constant(kBothSeqOneByteStringTag)),
-            &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
-        assembler->Bind(&if_bothonebyteseqstrings);
-        {
-          // Compute the effective offset of the first character.
-          Node* begin = assembler->IntPtrConstant(
-              SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
-          // Compute the first offset after the string from the length.
-          Node* end =
-              assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
-
-          // Loop over the {lhs} and {rhs} strings to see if they are equal.
-          Variable var_offset(assembler, MachineType::PointerRepresentation());
-          Label loop(assembler, &var_offset);
-          var_offset.Bind(begin);
-          assembler->Goto(&loop);
-          assembler->Bind(&loop);
-          {
-            // Check if {offset} equals {end}.
-            Node* offset = var_offset.value();
-            Label if_done(assembler), if_notdone(assembler);
-            assembler->Branch(assembler->WordEqual(offset, end), &if_done,
-                              &if_notdone);
-
-            assembler->Bind(&if_notdone);
-            {
-              // Load the next characters from {lhs} and {rhs}.
-              Node* lhs_value =
-                  assembler->Load(MachineType::Uint8(), lhs, offset);
-              Node* rhs_value =
-                  assembler->Load(MachineType::Uint8(), rhs, offset);
-
-              // Check if the characters match.
-              Label if_valueissame(assembler), if_valueisnotsame(assembler);
-              assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
-                                &if_valueissame, &if_valueisnotsame);
-
-              assembler->Bind(&if_valueissame);
-              {
-                // Advance to next character.
-                var_offset.Bind(
-                    assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
-              }
-              assembler->Goto(&loop);
-
-              assembler->Bind(&if_valueisnotsame);
-              assembler->Goto(&if_notequal);
-            }
-
-            assembler->Bind(&if_done);
-            assembler->Goto(&if_equal);
-          }
-        }
-
-        assembler->Bind(&if_notbothonebyteseqstrings);
-        {
-          // TODO(bmeurer): Add fast case support for flattened cons strings;
-          // also add support for two byte string equality checks.
-          Runtime::FunctionId function_id = (mode == kDontNegateResult)
-                                                ? Runtime::kStringEqual
-                                                : Runtime::kStringNotEqual;
-          assembler->TailCallRuntime(function_id, context, lhs, rhs);
-        }
-      }
-    }
-
-    assembler->Bind(&if_lengthisnotequal);
-    {
-      // Mismatch in length of {lhs} and {rhs}, cannot be equal.
-      assembler->Goto(&if_notequal);
-    }
-  }
-
-  assembler->Bind(&if_equal);
-  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
-
-  assembler->Bind(&if_notequal);
-  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
-}
-
 }  // namespace
 
 void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
@@ -4427,13 +4172,226 @@
   Node* holder = receiver;
   Node* map = assembler->LoadMap(receiver);
   Node* descriptors = assembler->LoadMapDescriptors(map);
-  Node* offset =
-      assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
-  Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
+  Node* value_index =
+      assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
+  Node* callback = assembler->LoadFixedArrayElement(
+      descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
   assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
                           holder, callback);
 }
 
+void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  FieldIndex index = this->index();
+  Representation representation = this->representation();
+
+  assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
+                     index.is_inobject(), index.offset(),
+                     representation.Mnemonic());
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  Node* prepared_value =
+      assembler->PrepareValueForWrite(value, representation, &miss);
+  assembler->StoreNamedField(receiver, index, representation, prepared_value,
+                             false);
+  assembler->Return(value);
+
+  // Only stores to tagged field can't bailout.
+  if (!representation.IsTagged()) {
+    assembler->Bind(&miss);
+    {
+      assembler->Comment("Miss");
+      assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                                 vector, receiver, name);
+    }
+  }
+}
+
+void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  assembler->Comment(
+      "StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
+      cell_type(), PropertyCellType::kConstantType == cell_type()
+                       ? static_cast<int>(constant_type())
+                       : -1,
+      check_global());
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  if (check_global()) {
+    // Check that the map of the global has not changed: use a placeholder map
+    // that will be replaced later with the global object's map.
+    Node* proxy_map = assembler->LoadMap(receiver);
+    Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
+    Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+        StoreGlobalStub::global_map_placeholder(isolate())));
+    Node* expected_map = assembler->LoadWeakCellValue(map_cell);
+    Node* map = assembler->LoadMap(global);
+    assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+  }
+
+  Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+      StoreGlobalStub::property_cell_placeholder(isolate())));
+  Node* cell = assembler->LoadWeakCellValue(weak_cell);
+  assembler->GotoIf(assembler->WordIsSmi(cell), &miss);
+
+  // Load the payload of the global parameter cell. A hole indicates that the
+  // cell has been invalidated and that the store must be handled by the
+  // runtime.
+  Node* cell_contents =
+      assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+
+  PropertyCellType cell_type = this->cell_type();
+  if (cell_type == PropertyCellType::kConstant ||
+      cell_type == PropertyCellType::kUndefined) {
+    // This is always valid for all states a cell can be in.
+    assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
+  } else {
+    assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
+
+    // When dealing with constant types, the type may be allowed to change, as
+    // long as optimized code remains valid.
+    bool value_is_smi = false;
+    if (cell_type == PropertyCellType::kConstantType) {
+      switch (constant_type()) {
+        case PropertyCellConstantType::kSmi:
+          assembler->GotoUnless(assembler->WordIsSmi(value), &miss);
+          value_is_smi = true;
+          break;
+        case PropertyCellConstantType::kStableMap: {
+          // It is sufficient here to check that the value and cell contents
+          // have identical maps, no matter if they are stable or not or if they
+          // are the maps that were originally in the cell or not. If optimized
+          // code will deopt when a cell has a unstable map and if it has a
+          // dependency on a stable map, it will deopt if the map destabilizes.
+          assembler->GotoIf(assembler->WordIsSmi(value), &miss);
+          assembler->GotoIf(assembler->WordIsSmi(cell_contents), &miss);
+          Node* expected_map = assembler->LoadMap(cell_contents);
+          Node* map = assembler->LoadMap(value);
+          assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+          break;
+        }
+      }
+    }
+    if (value_is_smi) {
+      assembler->StoreObjectFieldNoWriteBarrier(
+          cell, PropertyCell::kValueOffset, value);
+    } else {
+      assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
+    }
+  }
+
+  assembler->Return(value);
+
+  assembler->Bind(&miss);
+  {
+    assembler->Comment("Miss");
+    assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                               vector, receiver, name);
+  }
+}
+
+void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* key = assembler->Parameter(Descriptor::kName);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
+  assembler->Return(result);
+
+  assembler->Bind(&miss);
+  {
+    assembler->Comment("Miss");
+    assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
+                               key, slot, vector);
+  }
+}
+
+void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* key = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
+  assembler->Return(value);
+
+  assembler->Bind(&miss);
+  {
+    assembler->Comment("Miss");
+    assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
+                               slot, vector, receiver, key);
+  }
+}
+
+void LoadScriptContextFieldStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
+                     context_index(), slot_index());
+
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Node* script_context = assembler->LoadScriptContext(context, context_index());
+  Node* result = assembler->LoadFixedArrayElement(
+      script_context, assembler->IntPtrConstant(slot_index()), 0,
+      CodeStubAssembler::INTPTR_PARAMETERS);
+  assembler->Return(result);
+}
+
+void StoreScriptContextFieldStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
+                     context_index(), slot_index());
+
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Node* script_context = assembler->LoadScriptContext(context, context_index());
+  assembler->StoreFixedArrayElement(
+      script_context, assembler->IntPtrConstant(slot_index()), value,
+      UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
+  assembler->Return(value);
+}
+
 // static
 compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
                                        compiler::Node* lhs, compiler::Node* rhs,
@@ -4499,33 +4457,6 @@
   return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
 }
 
-void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateStringEqual(assembler, kDontNegateResult);
-}
-
-void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateStringEqual(assembler, kNegateResult);
-}
-
-void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  GenerateStringRelationalComparison(assembler, kLessThan);
-}
-
-void StringLessThanOrEqualStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
-}
-
-void StringGreaterThanStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  GenerateStringRelationalComparison(assembler, kGreaterThan);
-}
-
-void StringGreaterThanOrEqualStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
-}
-
 void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
@@ -4557,8 +4488,7 @@
     // Check if {len} is a HeapNumber.
     Label if_lenisheapnumber(assembler),
         if_lenisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordEqual(assembler->LoadMap(len),
-                                           assembler->HeapNumberMapConstant()),
+    assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
                       &if_lenisheapnumber, &if_lenisnotheapnumber);
 
     assembler->Bind(&if_lenisheapnumber);
@@ -4603,64 +4533,12 @@
 }
 
 void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
 
-  Node* context = assembler->Parameter(1);
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
 
-  // We might need to loop once for ToNumber conversion.
-  Variable var_arg(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_arg);
-  var_arg.Bind(assembler->Parameter(0));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    // Shared entry points.
-    Label return_arg(assembler), return_zero(assembler, Label::kDeferred);
-
-    // Load the current {arg} value.
-    Node* arg = var_arg.value();
-
-    // Check if {arg} is a Smi.
-    assembler->GotoIf(assembler->WordIsSmi(arg), &return_arg);
-
-    // Check if {arg} is a HeapNumber.
-    Label if_argisheapnumber(assembler),
-        if_argisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordEqual(assembler->LoadMap(arg),
-                                           assembler->HeapNumberMapConstant()),
-                      &if_argisheapnumber, &if_argisnotheapnumber);
-
-    assembler->Bind(&if_argisheapnumber);
-    {
-      // Load the floating-point value of {arg}.
-      Node* arg_value = assembler->LoadHeapNumberValue(arg);
-
-      // Check if {arg} is NaN.
-      assembler->GotoUnless(assembler->Float64Equal(arg_value, arg_value),
-                            &return_zero);
-
-      // Truncate {arg} towards zero.
-      Node* value = assembler->Float64Trunc(arg_value);
-      var_arg.Bind(assembler->ChangeFloat64ToTagged(value));
-      assembler->Goto(&return_arg);
-    }
-
-    assembler->Bind(&if_argisnotheapnumber);
-    {
-      // Need to convert {arg} to a Number first.
-      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
-      var_arg.Bind(assembler->CallStub(callable, context, arg));
-      assembler->Goto(&loop);
-    }
-
-    assembler->Bind(&return_arg);
-    assembler->Return(var_arg.value());
-
-    assembler->Bind(&return_zero);
-    assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
-  }
+  assembler->Return(assembler->ToInteger(context, input));
 }
 
 void StoreInterceptorStub::GenerateAssembly(
@@ -4727,15 +4605,13 @@
   typedef compiler::CodeAssembler::Label Label;
   typedef compiler::CodeAssembler::Variable Variable;
 
-  Node* undefined = assembler->UndefinedConstant();
   Node* literals_array =
       assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
   Node* allocation_site = assembler->LoadFixedArrayElement(
       literals_array, literals_index,
       LiteralsArray::kFirstLiteralIndex * kPointerSize,
       CodeStubAssembler::SMI_PARAMETERS);
-  assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
-                    call_runtime);
+  assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
 
   // Calculate the object and allocation size based on the properties count.
   Node* object_size = assembler->IntPtrAdd(
@@ -4886,14 +4762,10 @@
 
 
 void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  if (kind() == Code::STORE_IC) {
-    descriptor->Initialize(FUNCTION_ADDR(Runtime_StoreIC_MissFromStubFailure));
-  } else if (kind() == Code::KEYED_LOAD_IC) {
+  DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
+  if (kind() == Code::KEYED_LOAD_IC) {
     descriptor->Initialize(
         FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
-  } else if (kind() == Code::KEYED_STORE_IC) {
-    descriptor->Initialize(
-        FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
   }
 }
 
@@ -4908,39 +4780,12 @@
 }
 
 
-void StoreFastElementStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      FUNCTION_ADDR(Runtime_ElementsTransitionAndStoreIC_Miss));
-}
-
-void StoreTransitionStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      FUNCTION_ADDR(Runtime_TransitionStoreIC_MissFromStubFailure));
-}
-
 void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   descriptor->Initialize(
       Runtime::FunctionForId(Runtime::kNumberToString)->entry);
   descriptor->SetMissHandler(Runtime::kNumberToString);
 }
 
-
-void FastCloneShallowArrayStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  FastCloneShallowArrayDescriptor call_descriptor(isolate());
-  descriptor->Initialize(
-      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
-  descriptor->SetMissHandler(Runtime::kCreateArrayLiteralStubBailout);
-}
-
 void RegExpConstructResultStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   descriptor->Initialize(
@@ -5088,9 +4933,7 @@
 
   assembler->Bind(&return_to_name);
   {
-    // TODO(cbruni): inline ToName here.
-    Callable callable = CodeFactory::ToName(assembler->isolate());
-    var_result.Bind(assembler->CallStub(callable, context, key));
+    var_result.Bind(assembler->ToName(context, key));
     assembler->Goto(&end);
   }
 
@@ -5193,43 +5036,49 @@
   Label if_normal(assembler), if_generator(assembler), if_async(assembler),
       if_class_constructor(assembler), if_function_without_prototype(assembler),
       load_map(assembler);
-  Variable map_index(assembler, MachineRepresentation::kTagged);
+  Variable map_index(assembler, MachineType::PointerRepresentation());
 
+  STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
   Node* is_not_normal = assembler->Word32And(
       compiler_hints,
-      assembler->Int32Constant(SharedFunctionInfo::kFunctionKindMaskBits));
+      assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
   assembler->GotoUnless(is_not_normal, &if_normal);
 
   Node* is_generator = assembler->Word32And(
       compiler_hints,
-      assembler->Int32Constant(1 << SharedFunctionInfo::kIsGeneratorBit));
+      assembler->Int32Constant(FunctionKind::kGeneratorFunction
+                               << SharedFunctionInfo::kFunctionKindShift));
   assembler->GotoIf(is_generator, &if_generator);
 
   Node* is_async = assembler->Word32And(
       compiler_hints,
-      assembler->Int32Constant(1 << SharedFunctionInfo::kIsAsyncFunctionBit));
+      assembler->Int32Constant(FunctionKind::kAsyncFunction
+                               << SharedFunctionInfo::kFunctionKindShift));
   assembler->GotoIf(is_async, &if_async);
 
   Node* is_class_constructor = assembler->Word32And(
       compiler_hints,
-      assembler->Int32Constant(SharedFunctionInfo::kClassConstructorBits));
+      assembler->Int32Constant(FunctionKind::kClassConstructor
+                               << SharedFunctionInfo::kFunctionKindShift));
   assembler->GotoIf(is_class_constructor, &if_class_constructor);
 
   if (FLAG_debug_code) {
     // Function must be a function without a prototype.
     assembler->Assert(assembler->Word32And(
-        compiler_hints, assembler->Int32Constant(
-                            SharedFunctionInfo::kAccessorFunctionBits |
-                            (1 << SharedFunctionInfo::kIsArrowBit) |
-                            (1 << SharedFunctionInfo::kIsConciseMethodBit))));
+        compiler_hints,
+        assembler->Int32Constant((FunctionKind::kAccessorFunction |
+                                  FunctionKind::kArrowFunction |
+                                  FunctionKind::kConciseMethod)
+                                 << SharedFunctionInfo::kFunctionKindShift)));
   }
   assembler->Goto(&if_function_without_prototype);
 
   assembler->Bind(&if_normal);
   {
     map_index.Bind(assembler->Select(
-        is_strict, assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX),
-        assembler->Int32Constant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
+        is_strict,
+        assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
+        assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
     assembler->Goto(&load_map);
   }
 
@@ -5237,8 +5086,8 @@
   {
     map_index.Bind(assembler->Select(
         is_strict,
-        assembler->Int32Constant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
-        assembler->Int32Constant(
+        assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
+        assembler->IntPtrConstant(
             Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
     assembler->Goto(&load_map);
   }
@@ -5247,21 +5096,21 @@
   {
     map_index.Bind(assembler->Select(
         is_strict,
-        assembler->Int32Constant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
-        assembler->Int32Constant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
+        assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
+        assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
     assembler->Goto(&load_map);
   }
 
   assembler->Bind(&if_class_constructor);
   {
     map_index.Bind(
-        assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX));
+        assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
     assembler->Goto(&load_map);
   }
 
   assembler->Bind(&if_function_without_prototype);
   {
-    map_index.Bind(assembler->Int32Constant(
+    map_index.Bind(assembler->IntPtrConstant(
         Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
     assembler->Goto(&load_map);
   }
@@ -5272,7 +5121,8 @@
   // as the map of the allocated object.
   Node* native_context = assembler->LoadNativeContext(context);
   Node* map_slot_value =
-      assembler->LoadFixedArrayElement(native_context, map_index.value());
+      assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
+                                       CodeStubAssembler::INTPTR_PARAMETERS);
   assembler->StoreMapNoWriteBarrier(result, map_slot_value);
 
   // Initialize the rest of the function.
@@ -5405,15 +5255,13 @@
 
   Variable result(assembler, MachineRepresentation::kTagged);
 
-  Node* undefined = assembler->UndefinedConstant();
   Node* literals_array =
       assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
   Node* boilerplate = assembler->LoadFixedArrayElement(
       literals_array, literal_index,
       LiteralsArray::kFirstLiteralIndex * kPointerSize,
       CodeStubAssembler::SMI_PARAMETERS);
-  assembler->GotoIf(assembler->WordEqual(boilerplate, undefined),
-                    &call_runtime);
+  assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
 
   {
     int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -5449,6 +5297,191 @@
       Generate(assembler, closure, literal_index, pattern, flags, context));
 }
 
+namespace {
+
+compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
+                                     compiler::Node* boilerplate,
+                                     compiler::Node* boilerplate_map,
+                                     compiler::Node* boilerplate_elements,
+                                     compiler::Node* allocation_site,
+                                     compiler::Node* capacity,
+                                     ElementsKind kind) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+  ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
+
+  Node* length = assembler->LoadJSArrayLength(boilerplate);
+
+  if (assembler->Is64()) {
+    capacity = assembler->SmiUntag(capacity);
+    param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
+  }
+
+  Node *array, *elements;
+  std::tie(array, elements) =
+      assembler->AllocateUninitializedJSArrayWithElements(
+          kind, boilerplate_map, length, allocation_site, capacity, param_mode);
+
+  assembler->Comment("copy elements header");
+  for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
+       offset += kPointerSize) {
+    Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
+    assembler->StoreObjectField(elements, offset, value);
+  }
+
+  if (assembler->Is64()) {
+    length = assembler->SmiUntag(length);
+  }
+
+  assembler->Comment("copy boilerplate elements");
+  assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
+                                    length, SKIP_WRITE_BARRIER, param_mode);
+  assembler->IncrementCounter(
+      assembler->isolate()->counters()->inlined_copied_elements(), 1);
+
+  return array;
+}
+
+}  // namespace
+
+// static
+compiler::Node* FastCloneShallowArrayStub::Generate(
+    CodeStubAssembler* assembler, compiler::Node* closure,
+    compiler::Node* literal_index, compiler::Node* context,
+    CodeStubAssembler::Label* call_runtime,
+    AllocationSiteMode allocation_site_mode) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef compiler::Node Node;
+
+  Label zero_capacity(assembler), cow_elements(assembler),
+      fast_elements(assembler), return_result(assembler);
+  Variable result(assembler, MachineRepresentation::kTagged);
+
+  Node* literals_array =
+      assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* allocation_site = assembler->LoadFixedArrayElement(
+      literals_array, literal_index,
+      LiteralsArray::kFirstLiteralIndex * kPointerSize,
+      CodeStubAssembler::SMI_PARAMETERS);
+
+  assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
+  allocation_site = assembler->LoadFixedArrayElement(
+      literals_array, literal_index,
+      LiteralsArray::kFirstLiteralIndex * kPointerSize,
+      CodeStubAssembler::SMI_PARAMETERS);
+
+  Node* boilerplate = assembler->LoadObjectField(
+      allocation_site, AllocationSite::kTransitionInfoOffset);
+  Node* boilerplate_map = assembler->LoadMap(boilerplate);
+  Node* boilerplate_elements = assembler->LoadElements(boilerplate);
+  Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
+  allocation_site =
+      allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
+
+  Node* zero = assembler->SmiConstant(Smi::FromInt(0));
+  assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
+
+  Node* elements_map = assembler->LoadMap(boilerplate_elements);
+  assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
+
+  assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
+  {
+    assembler->Comment("fast double elements path");
+    if (FLAG_debug_code) {
+      Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
+      assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map),
+                          &correct_elements_map, &abort);
+
+      assembler->Bind(&abort);
+      {
+        Node* abort_id = assembler->SmiConstant(
+            Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
+        assembler->TailCallRuntime(Runtime::kAbort, context, abort_id);
+      }
+      assembler->Bind(&correct_elements_map);
+    }
+
+    Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
+                                       boilerplate_elements, allocation_site,
+                                       capacity, FAST_DOUBLE_ELEMENTS);
+    result.Bind(array);
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&fast_elements);
+  {
+    assembler->Comment("fast elements path");
+    Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
+                                       boilerplate_elements, allocation_site,
+                                       capacity, FAST_ELEMENTS);
+    result.Bind(array);
+    assembler->Goto(&return_result);
+  }
+
+  Variable length(assembler, MachineRepresentation::kTagged),
+      elements(assembler, MachineRepresentation::kTagged);
+  Label allocate_without_elements(assembler);
+
+  assembler->Bind(&cow_elements);
+  {
+    assembler->Comment("fixed cow path");
+    length.Bind(assembler->LoadJSArrayLength(boilerplate));
+    elements.Bind(boilerplate_elements);
+
+    assembler->Goto(&allocate_without_elements);
+  }
+
+  assembler->Bind(&zero_capacity);
+  {
+    assembler->Comment("zero capacity path");
+    length.Bind(zero);
+    elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
+
+    assembler->Goto(&allocate_without_elements);
+  }
+
+  assembler->Bind(&allocate_without_elements);
+  {
+    Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
+        FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+    assembler->StoreObjectField(array, JSObject::kElementsOffset,
+                                elements.value());
+    result.Bind(array);
+    assembler->Goto(&return_result);
+  }
+
+  assembler->Bind(&return_result);
+  return result.value();
+}
+
+void FastCloneShallowArrayStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  Node* closure = assembler->Parameter(Descriptor::kClosure);
+  Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
+  Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+  Label call_runtime(assembler, Label::kDeferred);
+  assembler->Return(Generate(assembler, closure, literal_index, context,
+                             &call_runtime, allocation_site_mode()));
+
+  assembler->Bind(&call_runtime);
+  {
+    assembler->Comment("call runtime");
+    Node* flags = assembler->SmiConstant(
+        Smi::FromInt(ArrayLiteral::kShallowElements |
+                     (allocation_site_mode() == TRACK_ALLOCATION_SITE
+                          ? 0
+                          : ArrayLiteral::kDisableMementos)));
+    assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
+                                             context, closure, literal_index,
+                                             constant_elements, flags));
+  }
+}
+
 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
   CreateAllocationSiteStub stub(isolate);
   stub.GetCode();
@@ -5463,9 +5496,38 @@
 
 void StoreElementStub::Generate(MacroAssembler* masm) {
   DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
-  ElementHandlerCompiler::GenerateStoreSlow(masm);
+  KeyedStoreIC::GenerateSlow(masm);
 }
 
+void StoreFastElementStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  assembler->Comment(
+      "StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
+      is_js_array(), ElementsKindToString(elements_kind()), store_mode());
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* key = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label miss(assembler);
+
+  assembler->EmitElementStore(receiver, key, value, is_js_array(),
+                              elements_kind(), store_mode(), &miss);
+  assembler->Return(value);
+
+  assembler->Bind(&miss);
+  {
+    assembler->Comment("Miss");
+    assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
+                               slot, vector, receiver, key);
+  }
+}
 
 // static
 void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -5597,58 +5659,9 @@
 
 void CreateAllocationSiteStub::GenerateAssembly(
     CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  Node* size = assembler->IntPtrConstant(AllocationSite::kSize);
-  Node* site = assembler->Allocate(size, CodeStubAssembler::kPretenured);
-
-  // Store the map
-  assembler->StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
-                                  Heap::kAllocationSiteMapRootIndex);
-
-  Node* kind =
-      assembler->SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
-  assembler->StoreObjectFieldNoWriteBarrier(
-      site, AllocationSite::kTransitionInfoOffset, kind);
-
-  // Unlike literals, constructed arrays don't have nested sites
-  Node* zero = assembler->IntPtrConstant(0);
-  assembler->StoreObjectFieldNoWriteBarrier(
-      site, AllocationSite::kNestedSiteOffset, zero);
-
-  // Pretenuring calculation field.
-  assembler->StoreObjectFieldNoWriteBarrier(
-      site, AllocationSite::kPretenureDataOffset, zero);
-
-  // Pretenuring memento creation count field.
-  assembler->StoreObjectFieldNoWriteBarrier(
-      site, AllocationSite::kPretenureCreateCountOffset, zero);
-
-  // Store an empty fixed array for the code dependency.
-  assembler->StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-
-  // Link the object to the allocation site list
-  Node* site_list = assembler->ExternalConstant(
-      ExternalReference::allocation_sites_list_address(isolate()));
-  Node* next_site = assembler->LoadBufferObject(site_list, 0);
-
-  // TODO(mvstanton): This is a store to a weak pointer, which we may want to
-  // mark as such in order to skip the write barrier, once we have a unified
-  // system for weakness. For now we decided to keep it like this because having
-  // an initial write barrier backed store makes this pointer strong until the
-  // next GC, and allocation sites are designed to survive several GCs anyway.
-  assembler->StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
-  assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list,
-                                 site);
-
-  Node* feedback_vector = assembler->Parameter(Descriptor::kVector);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-
-  assembler->StoreFixedArrayElement(feedback_vector, slot, site,
-                                    UPDATE_WRITE_BARRIER,
-                                    CodeStubAssembler::SMI_PARAMETERS);
-
-  assembler->Return(site);
+  assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
+      assembler->Parameter(Descriptor::kVector),
+      assembler->Parameter(Descriptor::kSlot)));
 }
 
 void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
@@ -5674,7 +5687,7 @@
   Node* array = assembler->AllocateJSArray(
       elements_kind(), array_map,
       assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->IntPtrConstant(0), allocation_site);
+      assembler->SmiConstant(Smi::FromInt(0)), allocation_site);
   assembler->Return(array);
 }
 
@@ -5687,7 +5700,7 @@
   Node* array = assembler->AllocateJSArray(
       elements_kind(), array_map,
       assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->IntPtrConstant(0), nullptr);
+      assembler->SmiConstant(Smi::FromInt(0)), nullptr);
   assembler->Return(array);
 }
 
@@ -5727,8 +5740,8 @@
     int element_size =
         IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
     int max_fast_elements =
-        (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
-         JSArray::kSize - AllocationMemento::kSize) /
+        (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
+         AllocationMemento::kSize) /
         element_size;
     assembler->Branch(
         assembler->SmiAboveOrEqual(
@@ -5796,9 +5809,8 @@
   ElementsKind kind = elements_kind();
 
   Node* elements = assembler->LoadElements(object);
-  Node* new_elements = assembler->CheckAndGrowElementsCapacity(
-      context, elements, kind, key, &runtime);
-  assembler->StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+  Node* new_elements =
+      assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
   assembler->Return(new_elements);
 
   assembler->Bind(&runtime);
@@ -5837,20 +5849,19 @@
 InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
     : PlatformCodeStub(isolate) {}
 
-Representation RepresentationFromType(Type* type) {
-  if (type->Is(Type::UntaggedIntegral())) {
+Representation RepresentationFromMachineType(MachineType type) {
+  if (type == MachineType::Int32()) {
     return Representation::Integer32();
   }
 
-  if (type->Is(Type::TaggedSigned())) {
+  if (type == MachineType::TaggedSigned()) {
     return Representation::Smi();
   }
 
-  if (type->Is(Type::UntaggedPointer())) {
+  if (type == MachineType::Pointer()) {
     return Representation::External();
   }
 
-  DCHECK(!type->Is(Type::Untagged()));
   return Representation::Tagged();
 }
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 4793d74..5c83fde 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -18,6 +18,8 @@
 namespace v8 {
 namespace internal {
 
+class ObjectLiteral;
+
 // List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)       \
   /* --- PlatformCodeStubs --- */             \
@@ -41,8 +43,6 @@
   V(StoreBufferOverflow)                      \
   V(StoreElement)                             \
   V(SubString)                                \
-  V(ToString)                                 \
-  V(ToName)                                   \
   V(StoreIC)                                  \
   V(KeyedStoreIC)                             \
   V(KeyedLoadIC)                              \
@@ -66,12 +66,8 @@
   V(KeyedStoreICTrampoline)                   \
   V(StoreICTrampoline)                        \
   /* --- HydrogenCodeStubs --- */             \
-  V(ElementsTransitionAndStore)               \
-  V(FastCloneShallowArray)                    \
   V(NumberToString)                           \
   V(StringAdd)                                \
-  V(ToObject)                                 \
-  V(Typeof)                                   \
   /* These builtins w/ JS linkage are */      \
   /* just fast-cases of C++ builtins. They */ \
   /* require varg support from TF */          \
@@ -81,18 +77,10 @@
   /* as part of the new IC system, ask */     \
   /* ishell before doing anything  */         \
   V(KeyedLoadGeneric)                         \
-  V(KeyedLoadSloppyArguments)                 \
-  V(KeyedStoreSloppyArguments)                \
   V(LoadConstant)                             \
   V(LoadDictionaryElement)                    \
   V(LoadFastElement)                          \
   V(LoadField)                                \
-  V(LoadScriptContextField)                   \
-  V(StoreFastElement)                         \
-  V(StoreField)                               \
-  V(StoreGlobal)                              \
-  V(StoreScriptContextField)                  \
-  V(StoreTransition)                          \
   /* These should never be ported to TF */    \
   /* because they are either used only by */  \
   /* FCG/Crankshaft or are deprecated */      \
@@ -140,8 +128,10 @@
   V(InternalArrayNoArgumentConstructor)       \
   V(InternalArraySingleArgumentConstructor)   \
   V(Dec)                                      \
-  V(FastCloneShallowObject)                   \
+  V(ElementsTransitionAndStore)               \
   V(FastCloneRegExp)                          \
+  V(FastCloneShallowArray)                    \
+  V(FastCloneShallowObject)                   \
   V(FastNewClosure)                           \
   V(FastNewFunctionContext)                   \
   V(InstanceOf)                               \
@@ -151,14 +141,12 @@
   V(GreaterThanOrEqual)                       \
   V(Equal)                                    \
   V(NotEqual)                                 \
+  V(KeyedLoadSloppyArguments)                 \
+  V(KeyedStoreSloppyArguments)                \
+  V(LoadScriptContextField)                   \
+  V(StoreScriptContextField)                  \
   V(StrictEqual)                              \
   V(StrictNotEqual)                           \
-  V(StringEqual)                              \
-  V(StringNotEqual)                           \
-  V(StringLessThan)                           \
-  V(StringLessThanOrEqual)                    \
-  V(StringGreaterThan)                        \
-  V(StringGreaterThanOrEqual)                 \
   V(ToInteger)                                \
   V(ToLength)                                 \
   V(HasProperty)                              \
@@ -166,16 +154,25 @@
   V(GetProperty)                              \
   V(LoadICTF)                                 \
   V(KeyedLoadICTF)                            \
+  V(StoreFastElement)                         \
+  V(StoreField)                               \
+  V(StoreGlobal)                              \
+  V(StoreICTF)                                \
   V(StoreInterceptor)                         \
+  V(StoreMap)                                 \
+  V(StoreTransition)                          \
   V(LoadApiGetter)                            \
   V(LoadIndexedInterceptor)                   \
   V(GrowArrayElements)                        \
+  V(ToObject)                                 \
+  V(Typeof)                                   \
   /* These are only called from FGC and */    \
   /* can be removed when we use ignition */   \
   /* only */                                  \
   V(LoadICTrampolineTF)                       \
   V(LoadGlobalICTrampoline)                   \
-  V(KeyedLoadICTrampolineTF)
+  V(KeyedLoadICTrampolineTF)                  \
+  V(StoreICTrampolineTF)
 
 // List of code stubs only used on ARM 32 bits platforms.
 #if V8_TARGET_ARCH_ARM
@@ -487,12 +484,6 @@
     return Descriptor(isolate());                                       \
   }
 
-#define DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(PARAMETER_COUNT)         \
- public:                                                                   \
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {    \
-    return OnStackArgsDescriptorBase::ForArgs(isolate(), PARAMETER_COUNT); \
-  }
-
 // There are some code stubs we just can't describe right now with a
 // CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
 // An attempt to retrieve a descriptor will fail.
@@ -564,7 +555,7 @@
     return call_descriptor().GetRegisterParameter(index);
   }
 
-  Type* GetParameterType(int index) const {
+  MachineType GetParameterType(int index) const {
     return call_descriptor().GetParameterType(index);
   }
 
@@ -993,57 +984,6 @@
   DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
 };
 
-class StringEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StringEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringEqual, TurboFanCodeStub);
-};
-
-class StringNotEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StringNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringNotEqual, TurboFanCodeStub);
-};
-
-class StringLessThanStub final : public TurboFanCodeStub {
- public:
-  explicit StringLessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringLessThan, TurboFanCodeStub);
-};
-
-class StringLessThanOrEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StringLessThanOrEqualStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringLessThanOrEqual, TurboFanCodeStub);
-};
-
-class StringGreaterThanStub final : public TurboFanCodeStub {
- public:
-  explicit StringGreaterThanStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringGreaterThan, TurboFanCodeStub);
-};
-
-class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
- public:
-  explicit StringGreaterThanOrEqualStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
-  DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
-};
-
 class ToIntegerStub final : public TurboFanCodeStub {
  public:
   explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -1174,7 +1114,7 @@
   // FastNewFunctionContextStub can only allocate closures which fit in the
   // new space.
   STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
-                 FixedArray::kHeaderSize) < Page::kMaxRegularHeapObjectSize);
+                 FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewFunctionContext);
   DEFINE_TURBOFAN_CODE_STUB(FastNewFunctionContext, TurboFanCodeStub);
@@ -1269,24 +1209,30 @@
   DEFINE_TURBOFAN_CODE_STUB(FastCloneRegExp, TurboFanCodeStub);
 };
 
-
-class FastCloneShallowArrayStub : public HydrogenCodeStub {
+class FastCloneShallowArrayStub : public TurboFanCodeStub {
  public:
   FastCloneShallowArrayStub(Isolate* isolate,
                             AllocationSiteMode allocation_site_mode)
-      : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(AllocationSiteModeBits::encode(allocation_site_mode));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = AllocationSiteModeBits::encode(allocation_site_mode);
   }
 
+  static compiler::Node* Generate(CodeStubAssembler* assembler,
+                                  compiler::Node* closure,
+                                  compiler::Node* literal_index,
+                                  compiler::Node* context,
+                                  CodeStubAssembler::Label* call_runtime,
+                                  AllocationSiteMode allocation_site_mode);
+
   AllocationSiteMode allocation_site_mode() const {
-    return AllocationSiteModeBits::decode(sub_minor_key());
+    return AllocationSiteModeBits::decode(minor_key_);
   }
 
  private:
   class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
-  DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowArray, TurboFanCodeStub);
 };
 
 class FastCloneShallowObjectStub : public TurboFanCodeStub {
@@ -1556,35 +1502,36 @@
   DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
 };
 
-
-class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
  public:
   explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
-      : HandlerStub(isolate) {}
+      : TurboFanCodeStub(isolate) {}
+
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
 
  protected:
-  Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
+  DEFINE_TURBOFAN_CODE_STUB(KeyedLoadSloppyArguments, TurboFanCodeStub);
 };
 
 
 class CommonStoreModeBits : public BitField<KeyedAccessStoreMode, 0, 3> {};
 
-class KeyedStoreSloppyArgumentsStub : public HandlerStub {
+class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
  public:
   explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate,
                                          KeyedAccessStoreMode mode)
-      : HandlerStub(isolate) {
-    set_sub_minor_key(CommonStoreModeBits::encode(mode));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = CommonStoreModeBits::encode(mode);
   }
 
- protected:
-  Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
 
+ protected:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_HANDLER_CODE_STUB(KeyedStoreSloppyArguments, HandlerStub);
+  DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
 };
 
 
@@ -1637,161 +1584,107 @@
   DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
 };
 
-class StoreFieldStub : public HandlerStub {
+class StoreFieldStub : public TurboFanCodeStub {
  public:
   StoreFieldStub(Isolate* isolate, FieldIndex index,
                  Representation representation)
-      : HandlerStub(isolate) {
+      : TurboFanCodeStub(isolate) {
     int property_index_key = index.GetFieldAccessStubKey();
-    uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
-    set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
-                      RepresentationBits::encode(repr));
+    minor_key_ = StoreFieldByIndexBits::encode(property_index_key) |
+                 RepresentationBits::encode(representation.kind());
   }
 
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+
   FieldIndex index() const {
-    int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
+    int property_index_key = StoreFieldByIndexBits::decode(minor_key_);
     return FieldIndex::FromFieldAccessStubKey(property_index_key);
   }
 
-  Representation representation() {
-    uint8_t repr = RepresentationBits::decode(sub_minor_key());
-    return PropertyDetails::DecodeRepresentation(repr);
+  Representation representation() const {
+    return Representation::FromKind(RepresentationBits::decode(minor_key_));
   }
 
- protected:
-  Code::Kind kind() const override { return Code::STORE_IC; }
-
  private:
   class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
-  class RepresentationBits : public BitField<uint8_t, 13, 4> {};
+  class RepresentationBits
+      : public BitField<Representation::Kind, StoreFieldByIndexBits::kNext, 4> {
+  };
+  STATIC_ASSERT(Representation::kNumRepresentations - 1 <
+                RepresentationBits::kMax);
 
-  // TODO(ishell): The stub uses only kReceiver and kValue parameters.
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_HANDLER_CODE_STUB(StoreField, HandlerStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreField, TurboFanCodeStub);
 };
 
-
-// Register and parameter access methods are specified here instead of in
-// the CallInterfaceDescriptor because the stub uses a different descriptor
-// if FLAG_vector_stores is on.
-class StoreTransitionHelper {
+class StoreMapStub : public TurboFanCodeStub {
  public:
-  static Register ReceiverRegister() {
-    return StoreTransitionDescriptor::ReceiverRegister();
-  }
+  explicit StoreMapStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
-  static Register NameRegister() {
-    return StoreTransitionDescriptor::NameRegister();
-  }
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
 
-  static Register ValueRegister() {
-    return StoreTransitionDescriptor::ValueRegister();
-  }
-
-  static Register SlotRegister() {
-    return VectorStoreTransitionDescriptor::SlotRegister();
-  }
-
-  static Register VectorRegister() {
-    return VectorStoreTransitionDescriptor::VectorRegister();
-  }
-
-  static Register MapRegister() {
-    return VectorStoreTransitionDescriptor::MapRegister();
-  }
-
-  static int ReceiverIndex() { return StoreTransitionDescriptor::kReceiver; }
-
-  static int NameIndex() { return StoreTransitionDescriptor::kReceiver; }
-
-  static int ValueIndex() { return StoreTransitionDescriptor::kValue; }
-
-  static int MapIndex() {
-    DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMap) ==
-           static_cast<int>(StoreTransitionDescriptor::kMap));
-    return StoreTransitionDescriptor::kMap;
-  }
-
-  static int VectorIndex() {
-    if (HasVirtualSlotArg()) {
-      return VectorStoreTransitionDescriptor::kVirtualSlotVector;
-    }
-    return VectorStoreTransitionDescriptor::kVector;
-  }
-
-  // Some platforms don't have a slot arg.
-  static bool HasVirtualSlotArg() {
-    return SlotRegister().is(no_reg);
-  }
+ private:
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
+  DEFINE_TURBOFAN_CODE_STUB(StoreMap, TurboFanCodeStub);
 };
 
-
-class StoreTransitionStub : public HandlerStub {
+class StoreTransitionStub : public TurboFanCodeStub {
  public:
   enum StoreMode {
-    StoreMapOnly,
     StoreMapAndValue,
     ExtendStorageAndStoreMapAndValue
   };
 
-  explicit StoreTransitionStub(Isolate* isolate) : HandlerStub(isolate) {
-    set_sub_minor_key(StoreModeBits::encode(StoreMapOnly));
-  }
-
-  StoreTransitionStub(Isolate* isolate, FieldIndex index,
+  StoreTransitionStub(Isolate* isolate, bool is_inobject,
                       Representation representation, StoreMode store_mode)
-      : HandlerStub(isolate) {
-    DCHECK(store_mode != StoreMapOnly);
-    int property_index_key = index.GetFieldAccessStubKey();
-    uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
-    set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
-                      RepresentationBits::encode(repr) |
-                      StoreModeBits::encode(store_mode));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = IsInobjectBits::encode(is_inobject) |
+                 RepresentationBits::encode(representation.kind()) |
+                 StoreModeBits::encode(store_mode);
   }
 
-  FieldIndex index() const {
-    DCHECK(store_mode() != StoreMapOnly);
-    int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
-    return FieldIndex::FromFieldAccessStubKey(property_index_key);
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+
+  bool is_inobject() const { return IsInobjectBits::decode(minor_key_); }
+
+  Representation representation() const {
+    return Representation::FromKind(RepresentationBits::decode(minor_key_));
   }
 
-  Representation representation() {
-    DCHECK(store_mode() != StoreMapOnly);
-    uint8_t repr = RepresentationBits::decode(sub_minor_key());
-    return PropertyDetails::DecodeRepresentation(repr);
-  }
-
-  StoreMode store_mode() const {
-    return StoreModeBits::decode(sub_minor_key());
-  }
-
- protected:
-  Code::Kind kind() const override { return Code::STORE_IC; }
-  void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+  StoreMode store_mode() const { return StoreModeBits::decode(minor_key_); }
 
  private:
-  class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
-  class RepresentationBits : public BitField<uint8_t, 13, 4> {};
-  class StoreModeBits : public BitField<StoreMode, 17, 2> {};
+  class IsInobjectBits : public BitField<bool, 0, 1> {};
+  class RepresentationBits
+      : public BitField<Representation::Kind, IsInobjectBits::kNext, 4> {};
+  STATIC_ASSERT(Representation::kNumRepresentations - 1 <
+                RepresentationBits::kMax);
+  class StoreModeBits
+      : public BitField<StoreMode, RepresentationBits::kNext, 1> {};
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
-  DEFINE_HANDLER_CODE_STUB(StoreTransition, HandlerStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreNamedTransition);
+  DEFINE_TURBOFAN_CODE_STUB(StoreTransition, TurboFanCodeStub);
 };
 
-
-class StoreGlobalStub : public HandlerStub {
+class StoreGlobalStub : public TurboFanCodeStub {
  public:
   StoreGlobalStub(Isolate* isolate, PropertyCellType type,
                   Maybe<PropertyCellConstantType> constant_type,
                   bool check_global)
-      : HandlerStub(isolate) {
+      : TurboFanCodeStub(isolate) {
     PropertyCellConstantType encoded_constant_type =
         constant_type.FromMaybe(PropertyCellConstantType::kSmi);
-    set_sub_minor_key(CellTypeBits::encode(type) |
-                      ConstantTypeBits::encode(encoded_constant_type) |
-                      CheckGlobalBits::encode(check_global));
+    minor_key_ = CellTypeBits::encode(type) |
+                 ConstantTypeBits::encode(encoded_constant_type) |
+                 CheckGlobalBits::encode(check_global);
   }
 
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+
   static Handle<HeapObject> property_cell_placeholder(Isolate* isolate) {
     return isolate->factory()->uninitialized_value();
   }
@@ -1812,37 +1705,25 @@
     return CodeStub::GetCodeCopy(pattern);
   }
 
-  Code::Kind kind() const override { return Code::STORE_IC; }
-
   PropertyCellType cell_type() const {
-    return CellTypeBits::decode(sub_minor_key());
+    return CellTypeBits::decode(minor_key_);
   }
 
   PropertyCellConstantType constant_type() const {
     DCHECK(PropertyCellType::kConstantType == cell_type());
-    return ConstantTypeBits::decode(sub_minor_key());
+    return ConstantTypeBits::decode(minor_key_);
   }
 
-  bool check_global() const { return CheckGlobalBits::decode(sub_minor_key()); }
-
-  Representation representation() {
-    return Representation::FromKind(
-        RepresentationBits::decode(sub_minor_key()));
-  }
-
-  void set_representation(Representation r) {
-    set_sub_minor_key(RepresentationBits::update(sub_minor_key(), r.kind()));
-  }
+  bool check_global() const { return CheckGlobalBits::decode(minor_key_); }
 
  private:
   class CellTypeBits : public BitField<PropertyCellType, 0, 2> {};
-  class ConstantTypeBits : public BitField<PropertyCellConstantType, 2, 2> {};
-  class RepresentationBits : public BitField<Representation::Kind, 4, 8> {};
-  class CheckGlobalBits : public BitField<bool, 12, 1> {};
+  class ConstantTypeBits
+      : public BitField<PropertyCellConstantType, CellTypeBits::kNext, 2> {};
+  class CheckGlobalBits : public BitField<bool, ConstantTypeBits::kNext, 1> {};
 
-  // TODO(ishell): The stub uses only kValue parameter.
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_HANDLER_CODE_STUB(StoreGlobal, HandlerStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreGlobal, TurboFanCodeStub);
 };
 
 // TODO(ishell): remove, once StoreGlobalIC is implemented.
@@ -1889,10 +1770,6 @@
       : CallApiCallbackStub(isolate, argc, false, call_data_undefined,
                             is_lazy) {}
 
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
-  }
-
  private:
   CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
                       bool call_data_undefined, bool is_lazy)
@@ -1916,6 +1793,7 @@
   class ArgumentBits : public BitField<int, 2, kArgBits> {};
   class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
 
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
   DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
 };
 
@@ -2195,11 +2073,11 @@
  public:
   explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
 
-  DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(4);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpExec);
   DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
 };
 
-
+// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
 class RegExpConstructResultStub final : public HydrogenCodeStub {
  public:
   explicit RegExpConstructResultStub(Isolate* isolate)
@@ -2490,15 +2368,34 @@
   }
 
  protected:
-  StoreICState state() const {
-    return StoreICState(static_cast<ExtraICState>(minor_key_));
-  }
+  StoreICState state() const { return StoreICState(GetExtraICState()); }
 
  private:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
   DEFINE_PLATFORM_CODE_STUB(StoreICTrampoline, PlatformCodeStub);
 };
 
+class StoreICTrampolineTFStub : public TurboFanCodeStub {
+ public:
+  StoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+  ExtraICState GetExtraICState() const final {
+    return static_cast<ExtraICState>(minor_key_);
+  }
+
+ protected:
+  StoreICState state() const { return StoreICState(GetExtraICState()); }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+  DEFINE_CODE_STUB(StoreICTrampolineTF, TurboFanCodeStub);
+};
+
 class KeyedStoreICTrampolineStub : public StoreICTrampolineStub {
  public:
   KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
@@ -2627,6 +2524,24 @@
   void GenerateImpl(MacroAssembler* masm, bool in_frame);
 };
 
+class StoreICTFStub : public TurboFanCodeStub {
+ public:
+  StoreICTFStub(Isolate* isolate, const StoreICState& state)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+  ExtraICState GetExtraICState() const final {
+    return static_cast<ExtraICState>(minor_key_);
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
+  DEFINE_CODE_STUB(StoreICTF, TurboFanCodeStub);
+};
+
 class KeyedStoreICStub : public PlatformCodeStub {
  public:
   KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
@@ -2696,23 +2611,21 @@
   DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
 };
 
-
-class ScriptContextFieldStub : public HandlerStub {
+class ScriptContextFieldStub : public TurboFanCodeStub {
  public:
   ScriptContextFieldStub(Isolate* isolate,
                          const ScriptContextTable::LookupResult* lookup_result)
-      : HandlerStub(isolate) {
+      : TurboFanCodeStub(isolate) {
     DCHECK(Accepted(lookup_result));
-    STATIC_ASSERT(kContextIndexBits + kSlotIndexBits <= kSubMinorKeyBits);
-    set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
-                      SlotIndexBits::encode(lookup_result->slot_index));
+    minor_key_ = ContextIndexBits::encode(lookup_result->context_index) |
+                 SlotIndexBits::encode(lookup_result->slot_index);
   }
 
-  int context_index() const {
-    return ContextIndexBits::decode(sub_minor_key());
-  }
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
 
-  int slot_index() const { return SlotIndexBits::decode(sub_minor_key()); }
+  int context_index() const { return ContextIndexBits::decode(minor_key_); }
+
+  int slot_index() const { return SlotIndexBits::decode(minor_key_); }
 
   static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
     return ContextIndexBits::is_valid(lookup_result->context_index) &&
@@ -2726,7 +2639,7 @@
   class SlotIndexBits
       : public BitField<int, kContextIndexBits, kSlotIndexBits> {};
 
-  DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
+  DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, TurboFanCodeStub);
 };
 
 
@@ -2736,11 +2649,11 @@
       Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
       : ScriptContextFieldStub(isolate, lookup_result) {}
 
- private:
-  Code::Kind kind() const override { return Code::LOAD_IC; }
+  ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
 
+ private:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HANDLER_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
+  DEFINE_TURBOFAN_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
 };
 
 
@@ -2750,11 +2663,11 @@
       Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
       : ScriptContextFieldStub(isolate, lookup_result) {}
 
- private:
-  Code::Kind kind() const override { return Code::STORE_IC; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
 
+ private:
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_HANDLER_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
 };
 
 
@@ -2790,38 +2703,38 @@
   DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
 };
 
-
-class StoreFastElementStub : public HydrogenCodeStub {
+class StoreFastElementStub : public TurboFanCodeStub {
  public:
   StoreFastElementStub(Isolate* isolate, bool is_js_array,
                        ElementsKind elements_kind, KeyedAccessStoreMode mode)
-      : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(CommonStoreModeBits::encode(mode) |
-                      ElementsKindBits::encode(elements_kind) |
-                      IsJSArrayBits::encode(is_js_array));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = CommonStoreModeBits::encode(mode) |
+                 ElementsKindBits::encode(elements_kind) |
+                 IsJSArrayBits::encode(is_js_array);
   }
 
   static void GenerateAheadOfTime(Isolate* isolate);
 
-  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
+  bool is_js_array() const { return IsJSArrayBits::decode(minor_key_); }
 
   ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(sub_minor_key());
+    return ElementsKindBits::decode(minor_key_);
   }
 
   KeyedAccessStoreMode store_mode() const {
-    return CommonStoreModeBits::decode(sub_minor_key());
+    return CommonStoreModeBits::decode(minor_key_);
   }
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
 
  private:
-  class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
-  class IsJSArrayBits : public BitField<bool, 11, 1> {};
+  class ElementsKindBits
+      : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
+  class IsJSArrayBits : public BitField<bool, ElementsKindBits::kNext, 1> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreFastElement, TurboFanCodeStub);
 };
 
 
@@ -3008,10 +2921,6 @@
                  CommonStoreModeBits::encode(mode);
   }
 
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    return StoreWithVectorDescriptor(isolate());
-  }
-
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
 
@@ -3020,8 +2929,10 @@
     return ElementsKindBits::decode(minor_key_);
   }
 
-  class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
+  class ElementsKindBits
+      : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
 
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
   DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
 };
 
@@ -3098,34 +3009,35 @@
 
 std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
 
-class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
+class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
  public:
   ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
                                  ElementsKind to_kind, bool is_jsarray,
                                  KeyedAccessStoreMode store_mode)
-      : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(CommonStoreModeBits::encode(store_mode) |
-                      FromBits::encode(from_kind) | ToBits::encode(to_kind) |
-                      IsJSArrayBits::encode(is_jsarray));
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = CommonStoreModeBits::encode(store_mode) |
+                 FromBits::encode(from_kind) | ToBits::encode(to_kind) |
+                 IsJSArrayBits::encode(is_jsarray);
   }
 
-  ElementsKind from_kind() const { return FromBits::decode(sub_minor_key()); }
-  ElementsKind to_kind() const { return ToBits::decode(sub_minor_key()); }
-  bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
+  ElementsKind from_kind() const { return FromBits::decode(minor_key_); }
+  ElementsKind to_kind() const { return ToBits::decode(minor_key_); }
+  bool is_jsarray() const { return IsJSArrayBits::decode(minor_key_); }
   KeyedAccessStoreMode store_mode() const {
-    return CommonStoreModeBits::decode(sub_minor_key());
+    return CommonStoreModeBits::decode(minor_key_);
   }
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
 
  private:
-  class FromBits : public BitField<ElementsKind, 3, 8> {};
+  class FromBits
+      : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
   class ToBits : public BitField<ElementsKind, 11, 8> {};
   class IsJSArrayBits : public BitField<bool, 19, 1> {};
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
-  DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
+  DEFINE_TURBOFAN_CODE_STUB(ElementsTransitionAndStore, TurboFanCodeStub);
 };
 
 
@@ -3191,29 +3103,24 @@
   DEFINE_PLATFORM_CODE_STUB(StoreBufferOverflow, PlatformCodeStub);
 };
 
-
-class SubStringStub : public PlatformCodeStub {
+class SubStringStub : public TurboFanCodeStub {
  public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  explicit SubStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
-  DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(3);
-  DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
-};
+  static compiler::Node* Generate(CodeStubAssembler* assembler,
+                                  compiler::Node* string, compiler::Node* from,
+                                  compiler::Node* to, compiler::Node* context);
 
-class ToStringStub final : public PlatformCodeStub {
- public:
-  explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  void GenerateAssembly(CodeStubAssembler* assembler) const override {
+    assembler->Return(Generate(assembler,
+                               assembler->Parameter(Descriptor::kString),
+                               assembler->Parameter(Descriptor::kFrom),
+                               assembler->Parameter(Descriptor::kTo),
+                               assembler->Parameter(Descriptor::kContext)));
+  }
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
-};
-
-class ToNameStub final : public PlatformCodeStub {
- public:
-  explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
-  DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(SubString);
+  DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
 };
 
 class ToObjectStub final : public TurboFanCodeStub {
@@ -3231,7 +3138,7 @@
 #undef DEFINE_CODE_STUB
 #undef DEFINE_CODE_STUB_BASE
 
-extern Representation RepresentationFromType(Type* type);
+extern Representation RepresentationFromMachineType(MachineType type);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/codegen.cc b/src/codegen.cc
index e47db10..afd8a6f 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -12,10 +12,9 @@
 
 #include "src/ast/prettyprinter.h"
 #include "src/bootstrapper.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/debug/debug.h"
 #include "src/eh-frame.h"
-#include "src/parsing/parser.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -147,7 +146,8 @@
       isolate->bootstrapper()->IsActive()
           ? FLAG_print_builtin_code
           : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
-             (info->IsOptimizing() && FLAG_print_opt_code));
+             (info->IsOptimizing() && FLAG_print_opt_code &&
+              info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
   if (print_code) {
     std::unique_ptr<char[]> debug_name = info->GetDebugName();
     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
diff --git a/src/collector.h b/src/collector.h
index 8454aae..abb2fbb 100644
--- a/src/collector.h
+++ b/src/collector.h
@@ -6,7 +6,7 @@
 #define V8_COLLECTOR_H_
 
 #include "src/checks.h"
-#include "src/list.h"
+#include "src/list-inl.h"
 #include "src/vector.h"
 
 namespace v8 {
diff --git a/src/compilation-dependencies.cc b/src/compilation-dependencies.cc
index 96b3859..dfd7cfe 100644
--- a/src/compilation-dependencies.cc
+++ b/src/compilation-dependencies.cc
@@ -8,7 +8,7 @@
 #include "src/handles-inl.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compilation-info.cc b/src/compilation-info.cc
new file mode 100644
index 0000000..2e0934a
--- /dev/null
+++ b/src/compilation-info.cc
@@ -0,0 +1,214 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compilation-info.h"
+
+#include "src/api.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/isolate.h"
+#include "src/parsing/parse-info.h"
+
+namespace v8 {
+namespace internal {
+
+#define PARSE_INFO_GETTER(type, name)  \
+  type CompilationInfo::name() const { \
+    CHECK(parse_info());               \
+    return parse_info()->name();       \
+  }
+
+#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
+  type CompilationInfo::name() const {                  \
+    return parse_info() ? parse_info()->name() : def;   \
+  }
+
+PARSE_INFO_GETTER(Handle<Script>, script)
+PARSE_INFO_GETTER(FunctionLiteral*, literal)
+PARSE_INFO_GETTER_WITH_DEFAULT(DeclarationScope*, scope, nullptr)
+PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
+
+#undef PARSE_INFO_GETTER
+#undef PARSE_INFO_GETTER_WITH_DEFAULT
+
+bool CompilationInfo::has_shared_info() const {
+  return parse_info_ && !parse_info_->shared_info().is_null();
+}
+
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+                                 Handle<JSFunction> closure)
+    : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
+                      parse_info->isolate(), parse_info->zone()) {
+  closure_ = closure;
+
+  // Compiling for the snapshot typically results in different code than
+  // compiling later on. This means that code recompiled with deoptimization
+  // support won't be "equivalent" (as defined by SharedFunctionInfo::
+  // EnableDeoptimizationSupport), so it will replace the old code and all
+  // its type feedback. To avoid this, always compile functions in the snapshot
+  // with deoptimization support.
+  if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
+
+  if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
+  if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
+  if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+}
+
+CompilationInfo::CompilationInfo(Vector<const char> debug_name,
+                                 Isolate* isolate, Zone* zone,
+                                 Code::Flags code_flags)
+    : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
+
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+                                 Vector<const char> debug_name,
+                                 Code::Flags code_flags, Mode mode,
+                                 Isolate* isolate, Zone* zone)
+    : parse_info_(parse_info),
+      isolate_(isolate),
+      flags_(0),
+      code_flags_(code_flags),
+      mode_(mode),
+      osr_ast_id_(BailoutId::None()),
+      zone_(zone),
+      deferred_handles_(nullptr),
+      dependencies_(isolate, zone),
+      bailout_reason_(kNoReason),
+      prologue_offset_(Code::kPrologueOffsetNotSet),
+      parameter_count_(0),
+      optimization_id_(-1),
+      osr_expr_stack_height_(-1),
+      debug_name_(debug_name) {}
+
+CompilationInfo::~CompilationInfo() {
+  if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+    shared_info()->DisableOptimization(bailout_reason());
+  }
+  dependencies()->Rollback();
+  delete deferred_handles_;
+}
+
+int CompilationInfo::num_parameters() const {
+  return !IsStub() ? scope()->num_parameters() : parameter_count_;
+}
+
+int CompilationInfo::num_parameters_including_this() const {
+  return num_parameters() + (is_this_defined() ? 1 : 0);
+}
+
+bool CompilationInfo::is_this_defined() const { return !IsStub(); }
+
+// Primitive functions are unlikely to be picked up by the stack-walking
+// profiler, so they trigger their own optimization when they're called
+// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
+bool CompilationInfo::ShouldSelfOptimize() {
+  return FLAG_crankshaft &&
+         !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
+         !literal()->dont_optimize() &&
+         literal()->scope()->AllowsLazyCompilation() &&
+         !shared_info()->optimization_disabled();
+}
+
+void CompilationInfo::ReopenHandlesInNewHandleScope() {
+  closure_ = Handle<JSFunction>(*closure_);
+}
+
+bool CompilationInfo::has_simple_parameters() {
+  return scope()->has_simple_parameters();
+}
+
+std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
+  if (parse_info() && parse_info()->literal()) {
+    AllowHandleDereference allow_deref;
+    return parse_info()->literal()->debug_name()->ToCString();
+  }
+  if (parse_info() && !parse_info()->shared_info().is_null()) {
+    return parse_info()->shared_info()->DebugName()->ToCString();
+  }
+  Vector<const char> name_vec = debug_name_;
+  if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
+  std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
+  memcpy(name.get(), name_vec.start(), name_vec.length());
+  name[name_vec.length()] = '\0';
+  return name;
+}
+
+StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
+  switch (output_code_kind()) {
+    case Code::STUB:
+    case Code::BYTECODE_HANDLER:
+    case Code::HANDLER:
+    case Code::BUILTIN:
+#define CASE_KIND(kind) case Code::kind:
+      IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
+      return StackFrame::STUB;
+    case Code::WASM_FUNCTION:
+      return StackFrame::WASM;
+    case Code::JS_TO_WASM_FUNCTION:
+      return StackFrame::JS_TO_WASM;
+    case Code::WASM_TO_JS_FUNCTION:
+      return StackFrame::WASM_TO_JS;
+    default:
+      UNIMPLEMENTED();
+      return StackFrame::NONE;
+  }
+}
+
+int CompilationInfo::GetDeclareGlobalsFlags() const {
+  DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
+  return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
+         DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
+         DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
+}
+
+SourcePositionTableBuilder::RecordingMode
+CompilationInfo::SourcePositionRecordingMode() const {
+  return parse_info() && parse_info()->is_native()
+             ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
+             : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
+}
+
+bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
+  return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
+}
+
+bool CompilationInfo::has_context() const { return !closure().is_null(); }
+
+Context* CompilationInfo::context() const {
+  return has_context() ? closure()->context() : nullptr;
+}
+
+bool CompilationInfo::has_native_context() const {
+  return !closure().is_null() && (closure()->native_context() != nullptr);
+}
+
+Context* CompilationInfo::native_context() const {
+  return has_native_context() ? closure()->native_context() : nullptr;
+}
+
+bool CompilationInfo::has_global_object() const { return has_native_context(); }
+
+JSGlobalObject* CompilationInfo::global_object() const {
+  return has_global_object() ? native_context()->global_object() : nullptr;
+}
+
+void CompilationInfo::SetOptimizing() {
+  DCHECK(has_shared_info());
+  SetMode(OPTIMIZE);
+  optimization_id_ = isolate()->NextOptimizationId();
+  code_flags_ = Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
+}
+
+void CompilationInfo::AddInlinedFunction(
+    Handle<SharedFunctionInfo> inlined_function) {
+  inlined_functions_.push_back(InlinedFunctionHolder(
+      inlined_function, handle(inlined_function->code())));
+}
+
+Code::Kind CompilationInfo::output_code_kind() const {
+  return Code::ExtractKindFromFlags(code_flags_);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compilation-info.h b/src/compilation-info.h
new file mode 100644
index 0000000..88477ae
--- /dev/null
+++ b/src/compilation-info.h
@@ -0,0 +1,400 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILATION_INFO_H_
+#define V8_COMPILATION_INFO_H_
+
+#include <memory>
+
+#include "src/compilation-dependencies.h"
+#include "src/frames.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
+#include "src/utils.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+
+class DeclarationScope;
+class DeferredHandles;
+class FunctionLiteral;
+class JavaScriptFrame;
+class ParseInfo;
+class Isolate;
+class Zone;
+
+// CompilationInfo encapsulates some information known at compile time.  It
+// is constructed based on the resources available at compile-time.
+class CompilationInfo final {
+ public:
+  // Various configuration flags for a compilation, as well as some properties
+  // of the compiled code produced by a compilation.
+  enum Flag {
+    kDeferredCalling = 1 << 0,
+    kNonDeferredCalling = 1 << 1,
+    kSavesCallerDoubles = 1 << 2,
+    kRequiresFrame = 1 << 3,
+    kMustNotHaveEagerFrame = 1 << 4,
+    kDeoptimizationSupport = 1 << 5,
+    kDebug = 1 << 6,
+    kSerializing = 1 << 7,
+    kFunctionContextSpecializing = 1 << 8,
+    kFrameSpecializing = 1 << 9,
+    kNativeContextSpecializing = 1 << 10,
+    kInliningEnabled = 1 << 11,
+    kDisableFutureOptimization = 1 << 12,
+    kSplittingEnabled = 1 << 13,
+    kDeoptimizationEnabled = 1 << 14,
+    kSourcePositionsEnabled = 1 << 15,
+    kBailoutOnUninitialized = 1 << 16,
+    kOptimizeFromBytecode = 1 << 17,
+    kTypeFeedbackEnabled = 1 << 18,
+    kAccessorInliningEnabled = 1 << 19,
+  };
+
+  CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+  CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
+                  Code::Flags code_flags);
+  ~CompilationInfo();
+
+  ParseInfo* parse_info() const { return parse_info_; }
+
+  // -----------------------------------------------------------
+  // TODO(titzer): inline and delete accessors of ParseInfo
+  // -----------------------------------------------------------
+  Handle<Script> script() const;
+  FunctionLiteral* literal() const;
+  DeclarationScope* scope() const;
+  Handle<SharedFunctionInfo> shared_info() const;
+  bool has_shared_info() const;
+  // -----------------------------------------------------------
+
+  Isolate* isolate() const { return isolate_; }
+  Zone* zone() { return zone_; }
+  bool is_osr() const { return !osr_ast_id_.IsNone(); }
+  Handle<JSFunction> closure() const { return closure_; }
+  Handle<Code> code() const { return code_; }
+  Code::Flags code_flags() const { return code_flags_; }
+  BailoutId osr_ast_id() const { return osr_ast_id_; }
+  JavaScriptFrame* osr_frame() const { return osr_frame_; }
+  int num_parameters() const;
+  int num_parameters_including_this() const;
+  bool is_this_defined() const;
+
+  void set_parameter_count(int parameter_count) {
+    DCHECK(IsStub());
+    parameter_count_ = parameter_count;
+  }
+
+  bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
+  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+  bool is_calling() const {
+    return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
+  }
+
+  void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
+
+  bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
+
+  void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
+
+  bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
+
+  void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
+
+  bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
+
+  void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
+
+  bool requires_frame() const { return GetFlag(kRequiresFrame); }
+
+  void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
+
+  bool GetMustNotHaveEagerFrame() const {
+    return GetFlag(kMustNotHaveEagerFrame);
+  }
+
+  // Compiles marked as debug produce unoptimized code with debug break slots.
+  // Inner functions that cannot be compiled w/o context are compiled eagerly.
+  // Always include deoptimization support to avoid having to recompile again.
+  void MarkAsDebug() {
+    SetFlag(kDebug);
+    SetFlag(kDeoptimizationSupport);
+  }
+
+  bool is_debug() const { return GetFlag(kDebug); }
+
+  void PrepareForSerializing() { SetFlag(kSerializing); }
+
+  bool will_serialize() const { return GetFlag(kSerializing); }
+
+  void MarkAsFunctionContextSpecializing() {
+    SetFlag(kFunctionContextSpecializing);
+  }
+
+  bool is_function_context_specializing() const {
+    return GetFlag(kFunctionContextSpecializing);
+  }
+
+  void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
+
+  bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
+
+  void MarkAsNativeContextSpecializing() {
+    SetFlag(kNativeContextSpecializing);
+  }
+
+  bool is_native_context_specializing() const {
+    return GetFlag(kNativeContextSpecializing);
+  }
+
+  void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
+
+  bool is_deoptimization_enabled() const {
+    return GetFlag(kDeoptimizationEnabled);
+  }
+
+  void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+
+  bool is_type_feedback_enabled() const {
+    return GetFlag(kTypeFeedbackEnabled);
+  }
+
+  void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
+
+  bool is_accessor_inlining_enabled() const {
+    return GetFlag(kAccessorInliningEnabled);
+  }
+
+  void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
+
+  bool is_source_positions_enabled() const {
+    return GetFlag(kSourcePositionsEnabled);
+  }
+
+  void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
+
+  bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+
+  void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
+
+  bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
+
+  void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
+
+  bool is_bailout_on_uninitialized() const {
+    return GetFlag(kBailoutOnUninitialized);
+  }
+
+  void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
+
+  bool is_optimizing_from_bytecode() const {
+    return GetFlag(kOptimizeFromBytecode);
+  }
+
+  bool GeneratePreagedPrologue() const {
+    // Generate a pre-aged prologue if we are optimizing for size, which
+    // will make code flushing more aggressive. Only apply to Code::FUNCTION,
+    // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
+    return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
+           output_code_kind() == Code::FUNCTION;
+  }
+
+  void SetCode(Handle<Code> code) { code_ = code; }
+
+  void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
+    bytecode_array_ = bytecode_array;
+  }
+
+  bool ShouldTrapOnDeopt() const {
+    return (FLAG_trap_on_deopt && IsOptimizing()) ||
+           (FLAG_trap_on_stub_deopt && IsStub());
+  }
+
+  bool has_context() const;
+  Context* context() const;
+
+  bool has_native_context() const;
+  Context* native_context() const;
+
+  bool has_global_object() const;
+  JSGlobalObject* global_object() const;
+
+  // Accessors for the different compilation modes.
+  bool IsOptimizing() const { return mode_ == OPTIMIZE; }
+  bool IsStub() const { return mode_ == STUB; }
+  void SetOptimizing();
+  void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
+    SetOptimizing();
+    osr_ast_id_ = osr_ast_id;
+    osr_frame_ = osr_frame;
+  }
+
+  // Deoptimization support.
+  bool HasDeoptimizationSupport() const {
+    return GetFlag(kDeoptimizationSupport);
+  }
+  void EnableDeoptimizationSupport() {
+    DCHECK_EQ(BASE, mode_);
+    SetFlag(kDeoptimizationSupport);
+  }
+  bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
+
+  bool ExpectsJSReceiverAsReceiver();
+
+  // Determines whether or not to insert a self-optimization header.
+  bool ShouldSelfOptimize();
+
+  void set_deferred_handles(DeferredHandles* deferred_handles) {
+    DCHECK(deferred_handles_ == NULL);
+    deferred_handles_ = deferred_handles;
+  }
+
+  void ReopenHandlesInNewHandleScope();
+
+  void AbortOptimization(BailoutReason reason) {
+    DCHECK(reason != kNoReason);
+    if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
+    SetFlag(kDisableFutureOptimization);
+  }
+
+  void RetryOptimization(BailoutReason reason) {
+    DCHECK(reason != kNoReason);
+    if (GetFlag(kDisableFutureOptimization)) return;
+    bailout_reason_ = reason;
+  }
+
+  BailoutReason bailout_reason() const { return bailout_reason_; }
+
+  int prologue_offset() const {
+    DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
+    return prologue_offset_;
+  }
+
+  void set_prologue_offset(int prologue_offset) {
+    DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
+    prologue_offset_ = prologue_offset;
+  }
+
+  CompilationDependencies* dependencies() { return &dependencies_; }
+
+  int optimization_id() const { return optimization_id_; }
+
+  int osr_expr_stack_height() { return osr_expr_stack_height_; }
+  void set_osr_expr_stack_height(int height) {
+    DCHECK(height >= 0);
+    osr_expr_stack_height_ = height;
+  }
+
+  bool has_simple_parameters();
+
+  struct InlinedFunctionHolder {
+    Handle<SharedFunctionInfo> shared_info;
+
+    // Root that holds the unoptimized code of the inlined function alive
+    // (and out of reach of code flushing) until we finish compilation.
+    // Do not remove.
+    Handle<Code> inlined_code_object_root;
+
+    InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
+                          Handle<Code> inlined_code_object_root)
+        : shared_info(inlined_shared_info),
+          inlined_code_object_root(inlined_code_object_root) {}
+  };
+
+  typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
+  InlinedFunctionList const& inlined_functions() const {
+    return inlined_functions_;
+  }
+
+  void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function);
+
+  std::unique_ptr<char[]> GetDebugName() const;
+
+  Code::Kind output_code_kind() const;
+
+  StackFrame::Type GetOutputStackFrameType() const;
+
+  int GetDeclareGlobalsFlags() const;
+
+  SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
+
+ private:
+  // Compilation mode.
+  // BASE is generated by the full codegen, optionally prepared for bailouts.
+  // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
+  enum Mode { BASE, OPTIMIZE, STUB };
+
+  CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
+                  Code::Flags code_flags, Mode mode, Isolate* isolate,
+                  Zone* zone);
+
+  ParseInfo* parse_info_;
+  Isolate* isolate_;
+
+  void SetMode(Mode mode) { mode_ = mode; }
+
+  void SetFlag(Flag flag) { flags_ |= flag; }
+
+  void SetFlag(Flag flag, bool value) {
+    flags_ = value ? flags_ | flag : flags_ & ~flag;
+  }
+
+  bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
+
+  unsigned flags_;
+
+  Code::Flags code_flags_;
+
+  Handle<JSFunction> closure_;
+
+  // The compiled code.
+  Handle<Code> code_;
+
+  // Compilation mode flag and whether deoptimization is allowed.
+  Mode mode_;
+  BailoutId osr_ast_id_;
+
+  // Holds the bytecode array generated by the interpreter.
+  // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
+  // refactored to avoid us needing to carry the BytcodeArray around.
+  Handle<BytecodeArray> bytecode_array_;
+
+  // The zone from which the compilation pipeline working on this
+  // CompilationInfo allocates.
+  Zone* zone_;
+
+  DeferredHandles* deferred_handles_;
+
+  // Dependencies for this compilation, e.g. stable maps.
+  CompilationDependencies dependencies_;
+
+  BailoutReason bailout_reason_;
+
+  int prologue_offset_;
+
+  InlinedFunctionList inlined_functions_;
+
+  // Number of parameters used for compilation of stubs that require arguments.
+  int parameter_count_;
+
+  int optimization_id_;
+
+  int osr_expr_stack_height_;
+
+  // The current OSR frame for specialization or {nullptr}.
+  JavaScriptFrame* osr_frame_ = nullptr;
+
+  Vector<const char> debug_name_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILATION_INFO_H_
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.cc b/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 9237936..96956ae 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -5,6 +5,8 @@
 #include "src/compiler-dispatcher/compiler-dispatcher-job.h"
 
 #include "src/assert-scope.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/global-handles.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
@@ -12,21 +14,22 @@
 #include "src/parsing/parser.h"
 #include "src/parsing/scanner-character-streams.h"
 #include "src/unicode-cache.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
 CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
-                                             Handle<JSFunction> function,
+                                             Handle<SharedFunctionInfo> shared,
                                              size_t max_stack_size)
     : isolate_(isolate),
-      function_(Handle<JSFunction>::cast(
-          isolate_->global_handles()->Create(*function))),
-      max_stack_size_(max_stack_size) {
+      shared_(Handle<SharedFunctionInfo>::cast(
+          isolate_->global_handles()->Create(*shared))),
+      max_stack_size_(max_stack_size),
+      can_compile_on_background_thread_(false) {
   HandleScope scope(isolate_);
-  Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
-  Handle<Script> script(Script::cast(shared->script()), isolate_);
+  DCHECK(!shared_->outer_scope_info()->IsTheHole(isolate_));
+  Handle<Script> script(Script::cast(shared_->script()), isolate_);
   Handle<String> source(String::cast(script->source()), isolate_);
   can_parse_on_background_thread_ =
       source->IsExternalTwoByteString() || source->IsExternalOneByteString();
@@ -36,7 +39,7 @@
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status_ == CompileJobStatus::kInitial ||
          status_ == CompileJobStatus::kDone);
-  i::GlobalHandles::Destroy(Handle<Object>::cast(function_).location());
+  i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
 }
 
 void CompilerDispatcherJob::PrepareToParseOnMainThread() {
@@ -45,46 +48,42 @@
   HandleScope scope(isolate_);
   unicode_cache_.reset(new UnicodeCache());
   zone_.reset(new Zone(isolate_->allocator()));
-  Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
-  Handle<Script> script(Script::cast(shared->script()), isolate_);
+  Handle<Script> script(Script::cast(shared_->script()), isolate_);
   DCHECK(script->type() != Script::TYPE_NATIVE);
 
   Handle<String> source(String::cast(script->source()), isolate_);
-  if (source->IsExternalTwoByteString()) {
-    character_stream_.reset(new ExternalTwoByteStringUtf16CharacterStream(
-        Handle<ExternalTwoByteString>::cast(source), shared->start_position(),
-        shared->end_position()));
-  } else if (source->IsExternalOneByteString()) {
-    character_stream_.reset(new ExternalOneByteStringUtf16CharacterStream(
-        Handle<ExternalOneByteString>::cast(source), shared->start_position(),
-        shared->end_position()));
+  if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
+    character_stream_.reset(ScannerStream::For(
+        source, shared_->start_position(), shared_->end_position()));
   } else {
     source = String::Flatten(source);
     // Have to globalize the reference here, so it survives between function
     // calls.
     source_ = Handle<String>::cast(isolate_->global_handles()->Create(*source));
-    character_stream_.reset(new GenericStringUtf16CharacterStream(
-        source_, shared->start_position(), shared->end_position()));
+    character_stream_.reset(ScannerStream::For(
+        source_, shared_->start_position(), shared_->end_position()));
   }
   parse_info_.reset(new ParseInfo(zone_.get()));
   parse_info_->set_isolate(isolate_);
   parse_info_->set_character_stream(character_stream_.get());
   parse_info_->set_lazy();
   parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
-  parse_info_->set_is_named_expression(shared->is_named_expression());
-  parse_info_->set_calls_eval(shared->scope_info()->CallsEval());
-  parse_info_->set_compiler_hints(shared->compiler_hints());
-  parse_info_->set_start_position(shared->start_position());
-  parse_info_->set_end_position(shared->end_position());
+  parse_info_->set_is_named_expression(shared_->is_named_expression());
+  parse_info_->set_compiler_hints(shared_->compiler_hints());
+  parse_info_->set_start_position(shared_->start_position());
+  parse_info_->set_end_position(shared_->end_position());
   parse_info_->set_unicode_cache(unicode_cache_.get());
-  parse_info_->set_language_mode(shared->language_mode());
+  parse_info_->set_language_mode(shared_->language_mode());
 
   parser_.reset(new Parser(parse_info_.get()));
-  parser_->DeserializeScopeChain(
-      parse_info_.get(), handle(function_->context(), isolate_),
-      Scope::DeserializationMode::kDeserializeOffHeap);
+  Handle<ScopeInfo> outer_scope_info(
+      handle(ScopeInfo::cast(shared_->outer_scope_info())));
+  parser_->DeserializeScopeChain(parse_info_.get(),
+                                 outer_scope_info->length() > 0
+                                     ? MaybeHandle<ScopeInfo>(outer_scope_info)
+                                     : MaybeHandle<ScopeInfo>());
 
-  Handle<String> name(String::cast(shared->name()));
+  Handle<String> name(String::cast(shared_->name()));
   parse_info_->set_function_name(
       parse_info_->ast_value_factory()->GetString(name));
   status_ = CompileJobStatus::kReadyToParse;
@@ -108,8 +107,7 @@
   // use it.
   parse_info_->set_isolate(nullptr);
 
-  uintptr_t stack_limit =
-      reinterpret_cast<uintptr_t>(&stack_limit) - max_stack_size_ * KB;
+  uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
 
   parser_->set_stack_limit(stack_limit);
   parser_->ParseOnBackground(parse_info_.get());
@@ -131,25 +129,32 @@
   if (parse_info_->literal() == nullptr) {
     status_ = CompileJobStatus::kFailed;
   } else {
-    status_ = CompileJobStatus::kReadyToCompile;
+    status_ = CompileJobStatus::kReadyToAnalyse;
   }
 
   DeferredHandleScope scope(isolate_);
   {
-    // Create a canonical handle scope before internalizing parsed values if
-    // compiling bytecode. This is required for off-thread bytecode generation.
-    std::unique_ptr<CanonicalHandleScope> canonical;
-    if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
-
-    Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
-    Handle<Script> script(Script::cast(shared->script()), isolate_);
+    Handle<Script> script(Script::cast(shared_->script()), isolate_);
 
     parse_info_->set_script(script);
-    parse_info_->set_context(handle(function_->context(), isolate_));
+    Handle<ScopeInfo> outer_scope_info(
+        handle(ScopeInfo::cast(shared_->outer_scope_info())));
+    if (outer_scope_info->length() > 0) {
+      parse_info_->set_outer_scope_info(outer_scope_info);
+    }
+    parse_info_->set_shared_info(shared_);
 
-    // Do the parsing tasks which need to be done on the main thread. This will
-    // also handle parse errors.
-    parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+    {
+      // Create a canonical handle scope if compiling ignition bytecode. This is
+      // required by the constant array builder to de-duplicate objects without
+      // dereferencing handles.
+      std::unique_ptr<CanonicalHandleScope> canonical;
+      if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
+
+      // Do the parsing tasks which need to be done on the main thread. This
+      // will also handle parse errors.
+      parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+    }
     parser_->HandleSourceURLComments(isolate_, script);
 
     parse_info_->set_character_stream(nullptr);
@@ -163,6 +168,72 @@
   return status_ != CompileJobStatus::kFailed;
 }
 
+bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
+  DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+  DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
+
+  compile_info_.reset(
+      new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
+
+  DeferredHandleScope scope(isolate_);
+  if (Compiler::Analyze(parse_info_.get())) {
+    compile_job_.reset(
+        Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
+  }
+  compile_info_->set_deferred_handles(scope.Detach());
+
+  if (!compile_job_.get()) {
+    if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+    status_ = CompileJobStatus::kFailed;
+    return false;
+  }
+
+  can_compile_on_background_thread_ =
+      compile_job_->can_execute_on_background_thread();
+  status_ = CompileJobStatus::kReadyToCompile;
+  return true;
+}
+
+void CompilerDispatcherJob::Compile() {
+  DCHECK(status() == CompileJobStatus::kReadyToCompile);
+  DCHECK(can_compile_on_background_thread_ ||
+         ThreadId::Current().Equals(isolate_->thread_id()));
+
+  // Disallowing of handle dereference and heap access dealt with in
+  // CompilationJob::ExecuteJob.
+
+  uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
+  compile_job_->set_stack_limit(stack_limit);
+
+  CompilationJob::Status status = compile_job_->ExecuteJob();
+  USE(status);
+
+  // Always transition to kCompiled - errors will be reported by
+  // FinalizeCompilingOnMainThread.
+  status_ = CompileJobStatus::kCompiled;
+}
+
+bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
+  DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+  DCHECK(status() == CompileJobStatus::kCompiled);
+
+  if (compile_job_->state() == CompilationJob::State::kFailed ||
+      !Compiler::FinalizeCompilationJob(compile_job_.release())) {
+    if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+    status_ = CompileJobStatus::kFailed;
+    return false;
+  }
+
+  zone_.reset();
+  parse_info_.reset();
+  compile_info_.reset();
+  compile_job_.reset();
+  handles_from_parsing_.reset();
+
+  status_ = CompileJobStatus::kDone;
+  return true;
+}
+
 void CompilerDispatcherJob::ResetOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
 
@@ -172,6 +243,8 @@
   parse_info_.reset();
   zone_.reset();
   handles_from_parsing_.reset();
+  compile_info_.reset();
+  compile_job_.reset();
 
   if (!source_.is_null()) {
     i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.h b/src/compiler-dispatcher/compiler-dispatcher-job.h
index 50414af..f3aaf93 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -15,10 +15,11 @@
 namespace internal {
 
 class CompilationInfo;
+class CompilationJob;
 class Isolate;
-class JSFunction;
 class ParseInfo;
 class Parser;
+class SharedFunctionInfo;
 class String;
 class UnicodeCache;
 class Utf16CharacterStream;
@@ -28,14 +29,16 @@
   kInitial,
   kReadyToParse,
   kParsed,
+  kReadyToAnalyse,
   kReadyToCompile,
+  kCompiled,
   kFailed,
   kDone,
 };
 
 class CompilerDispatcherJob {
  public:
-  CompilerDispatcherJob(Isolate* isolate, Handle<JSFunction> function,
+  CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
                         size_t max_stack_size);
   ~CompilerDispatcherJob();
 
@@ -43,6 +46,11 @@
   bool can_parse_on_background_thread() const {
     return can_parse_on_background_thread_;
   }
+  // Should only be called after kReadyToCompile.
+  bool can_compile_on_background_thread() const {
+    DCHECK(compile_job_.get());
+    return can_compile_on_background_thread_;
+  }
 
   // Transition from kInitial to kReadyToParse.
   void PrepareToParseOnMainThread();
@@ -50,10 +58,21 @@
   // Transition from kReadyToParse to kParsed.
   void Parse();
 
-  // Transition from kParsed to kReadyToCompile (or kFailed). Returns false
+  // Transition from kParsed to kReadyToAnalyse (or kFailed). Returns false
   // when transitioning to kFailed. In that case, an exception is pending.
   bool FinalizeParsingOnMainThread();
 
+  // Transition from kReadyToAnalyse to kReadyToCompile (or kFailed). Returns
+  // false when transitioning to kFailed. In that case, an exception is pending.
+  bool PrepareToCompileOnMainThread();
+
+  // Transition from kReadyToCompile to kCompiled.
+  void Compile();
+
+  // Transition from kCompiled to kDone (or kFailed). Returns false when
+  // transitioning to kFailed. In that case, an exception is pending.
+  bool FinalizeCompilingOnMainThread();
+
   // Transition from any state to kInitial and free all resources.
   void ResetOnMainThread();
 
@@ -62,7 +81,7 @@
 
   CompileJobStatus status_ = CompileJobStatus::kInitial;
   Isolate* isolate_;
-  Handle<JSFunction> function_;  // Global handle.
+  Handle<SharedFunctionInfo> shared_;  // Global handle.
   Handle<String> source_;        // Global handle.
   size_t max_stack_size_;
 
@@ -74,7 +93,12 @@
   std::unique_ptr<Parser> parser_;
   std::unique_ptr<DeferredHandles> handles_from_parsing_;
 
+  // Members required for compiling.
+  std::unique_ptr<CompilationInfo> compile_info_;
+  std::unique_ptr<CompilationJob> compile_job_;
+
   bool can_parse_on_background_thread_;
+  bool can_compile_on_background_thread_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
 };
diff --git a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index be81047..75c50ee 100644
--- a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -5,6 +5,8 @@
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 
 #include "src/base/atomicops.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/isolate.h"
 #include "src/tracing/trace-event.h"
diff --git a/src/compiler.cc b/src/compiler.cc
index 9a5afe9..ec402fa 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -40,33 +40,11 @@
 namespace internal {
 
 
-#define PARSE_INFO_GETTER(type, name)  \
-  type CompilationInfo::name() const { \
-    CHECK(parse_info());               \
-    return parse_info()->name();       \
-  }
-
-
-#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
-  type CompilationInfo::name() const {                  \
-    return parse_info() ? parse_info()->name() : def;   \
-  }
-
-
-PARSE_INFO_GETTER(Handle<Script>, script)
-PARSE_INFO_GETTER(FunctionLiteral*, literal)
-PARSE_INFO_GETTER_WITH_DEFAULT(DeclarationScope*, scope, nullptr)
-PARSE_INFO_GETTER_WITH_DEFAULT(Handle<Context>, context,
-                               Handle<Context>::null())
-PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
-
-#undef PARSE_INFO_GETTER
-#undef PARSE_INFO_GETTER_WITH_DEFAULT
 
 // A wrapper around a CompilationInfo that detaches the Handles from
 // the underlying DeferredHandleScope and stores them in info_ on
 // destruction.
-class CompilationHandleScope BASE_EMBEDDED {
+class CompilationHandleScope final {
  public:
   explicit CompilationHandleScope(CompilationInfo* info)
       : deferred_(info->isolate()), info_(info) {}
@@ -91,154 +69,6 @@
 };
 
 // ----------------------------------------------------------------------------
-// Implementation of CompilationInfo
-
-bool CompilationInfo::has_shared_info() const {
-  return parse_info_ && !parse_info_->shared_info().is_null();
-}
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
-                                 Handle<JSFunction> closure)
-    : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
-                      parse_info->isolate(), parse_info->zone()) {
-  closure_ = closure;
-
-  // Compiling for the snapshot typically results in different code than
-  // compiling later on. This means that code recompiled with deoptimization
-  // support won't be "equivalent" (as defined by SharedFunctionInfo::
-  // EnableDeoptimizationSupport), so it will replace the old code and all
-  // its type feedback. To avoid this, always compile functions in the snapshot
-  // with deoptimization support.
-  if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
-
-  if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
-  if (FLAG_turbo_inlining) MarkAsInliningEnabled();
-  if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
-  if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
-}
-
-CompilationInfo::CompilationInfo(Vector<const char> debug_name,
-                                 Isolate* isolate, Zone* zone,
-                                 Code::Flags code_flags)
-    : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
-                                 Vector<const char> debug_name,
-                                 Code::Flags code_flags, Mode mode,
-                                 Isolate* isolate, Zone* zone)
-    : parse_info_(parse_info),
-      isolate_(isolate),
-      flags_(0),
-      code_flags_(code_flags),
-      mode_(mode),
-      osr_ast_id_(BailoutId::None()),
-      zone_(zone),
-      deferred_handles_(nullptr),
-      dependencies_(isolate, zone),
-      bailout_reason_(kNoReason),
-      prologue_offset_(Code::kPrologueOffsetNotSet),
-      track_positions_(FLAG_hydrogen_track_positions ||
-                       isolate->is_profiling()),
-      parameter_count_(0),
-      optimization_id_(-1),
-      osr_expr_stack_height_(0),
-      debug_name_(debug_name) {}
-
-CompilationInfo::~CompilationInfo() {
-  if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
-    shared_info()->DisableOptimization(bailout_reason());
-  }
-  dependencies()->Rollback();
-  delete deferred_handles_;
-}
-
-
-int CompilationInfo::num_parameters() const {
-  return !IsStub() ? scope()->num_parameters() : parameter_count_;
-}
-
-
-int CompilationInfo::num_parameters_including_this() const {
-  return num_parameters() + (is_this_defined() ? 1 : 0);
-}
-
-
-bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-
-
-// Primitive functions are unlikely to be picked up by the stack-walking
-// profiler, so they trigger their own optimization when they're called
-// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
-bool CompilationInfo::ShouldSelfOptimize() {
-  return FLAG_crankshaft &&
-         !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
-         !literal()->dont_optimize() &&
-         literal()->scope()->AllowsLazyCompilation() &&
-         !shared_info()->optimization_disabled();
-}
-
-
-bool CompilationInfo::has_simple_parameters() {
-  return scope()->has_simple_parameters();
-}
-
-std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
-  if (parse_info() && parse_info()->literal()) {
-    AllowHandleDereference allow_deref;
-    return parse_info()->literal()->debug_name()->ToCString();
-  }
-  if (parse_info() && !parse_info()->shared_info().is_null()) {
-    return parse_info()->shared_info()->DebugName()->ToCString();
-  }
-  Vector<const char> name_vec = debug_name_;
-  if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
-  std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
-  memcpy(name.get(), name_vec.start(), name_vec.length());
-  name[name_vec.length()] = '\0';
-  return name;
-}
-
-StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
-  switch (output_code_kind()) {
-    case Code::STUB:
-    case Code::BYTECODE_HANDLER:
-    case Code::HANDLER:
-    case Code::BUILTIN:
-#define CASE_KIND(kind) case Code::kind:
-      IC_KIND_LIST(CASE_KIND)
-#undef CASE_KIND
-      return StackFrame::STUB;
-    case Code::WASM_FUNCTION:
-      return StackFrame::WASM;
-    case Code::JS_TO_WASM_FUNCTION:
-      return StackFrame::JS_TO_WASM;
-    case Code::WASM_TO_JS_FUNCTION:
-      return StackFrame::WASM_TO_JS;
-    default:
-      UNIMPLEMENTED();
-      return StackFrame::NONE;
-  }
-}
-
-int CompilationInfo::GetDeclareGlobalsFlags() const {
-  DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
-  return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
-         DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
-         DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
-}
-
-SourcePositionTableBuilder::RecordingMode
-CompilationInfo::SourcePositionRecordingMode() const {
-  return parse_info() && parse_info()->is_native()
-             ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
-             : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
-}
-
-bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
-  return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
-}
-
-// ----------------------------------------------------------------------------
 // Implementation of CompilationJob
 
 CompilationJob::Status CompilationJob::PrepareJob() {
@@ -260,10 +90,18 @@
 }
 
 CompilationJob::Status CompilationJob::ExecuteJob() {
-  DisallowHeapAllocation no_allocation;
-  DisallowHandleAllocation no_handles;
-  DisallowHandleDereference no_deref;
-  DisallowCodeDependencyChange no_dependency_change;
+  std::unique_ptr<DisallowHeapAllocation> no_allocation;
+  std::unique_ptr<DisallowHandleAllocation> no_handles;
+  std::unique_ptr<DisallowHandleDereference> no_deref;
+  std::unique_ptr<DisallowCodeDependencyChange> no_dependency_change;
+  if (can_execute_on_background_thread()) {
+    no_allocation.reset(new DisallowHeapAllocation());
+    no_handles.reset(new DisallowHandleAllocation());
+    no_deref.reset(new DisallowHandleDereference());
+    no_dependency_change.reset(new DisallowCodeDependencyChange());
+  } else {
+    DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+  }
 
   // Delegate to the underlying implementation.
   DCHECK(state() == State::kReadyToExecute);
@@ -283,6 +121,73 @@
   return UpdateState(FinalizeJobImpl(), State::kSucceeded);
 }
 
+CompilationJob::Status CompilationJob::RetryOptimization(BailoutReason reason) {
+  DCHECK(info_->IsOptimizing());
+  info_->RetryOptimization(reason);
+  state_ = State::kFailed;
+  return FAILED;
+}
+
+CompilationJob::Status CompilationJob::AbortOptimization(BailoutReason reason) {
+  DCHECK(info_->IsOptimizing());
+  info_->AbortOptimization(reason);
+  state_ = State::kFailed;
+  return FAILED;
+}
+
+void CompilationJob::RecordUnoptimizedCompilationStats() const {
+  int code_size;
+  if (info()->has_bytecode_array()) {
+    code_size = info()->bytecode_array()->SizeIncludingMetadata();
+  } else {
+    code_size = info()->code()->SizeIncludingMetadata();
+  }
+
+  Counters* counters = isolate()->counters();
+  // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
+  counters->total_baseline_code_size()->Increment(code_size);
+  counters->total_baseline_compile_count()->Increment(1);
+
+  // TODO(5203): Add timers for each phase of compilation.
+}
+
+void CompilationJob::RecordOptimizedCompilationStats() const {
+  DCHECK(info()->IsOptimizing());
+  Handle<JSFunction> function = info()->closure();
+  if (!function->IsOptimized()) {
+    // Concurrent recompilation and OSR may race.  Increment only once.
+    int opt_count = function->shared()->opt_count();
+    function->shared()->set_opt_count(opt_count + 1);
+  }
+  double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
+  double ms_optimize = time_taken_to_execute_.InMillisecondsF();
+  double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
+  if (FLAG_trace_opt) {
+    PrintF("[optimizing ");
+    function->ShortPrint();
+    PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+           ms_codegen);
+  }
+  if (FLAG_trace_opt_stats) {
+    static double compilation_time = 0.0;
+    static int compiled_functions = 0;
+    static int code_size = 0;
+
+    compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
+    compiled_functions++;
+    code_size += function->shared()->SourceSize();
+    PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+           compiled_functions, code_size, compilation_time);
+  }
+  if (FLAG_hydrogen_stats) {
+    isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
+                                                    time_taken_to_execute_,
+                                                    time_taken_to_finalize_);
+  }
+}
+
+Isolate* CompilationJob::isolate() const { return info()->isolate(); }
+
 namespace {
 
 void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
@@ -341,41 +246,6 @@
   code->set_can_have_weak_objects(true);
 }
 
-void CompilationJob::RecordOptimizationStats() {
-  DCHECK(info()->IsOptimizing());
-  Handle<JSFunction> function = info()->closure();
-  if (!function->IsOptimized()) {
-    // Concurrent recompilation and OSR may race.  Increment only once.
-    int opt_count = function->shared()->opt_count();
-    function->shared()->set_opt_count(opt_count + 1);
-  }
-  double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
-  double ms_optimize = time_taken_to_execute_.InMillisecondsF();
-  double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
-  if (FLAG_trace_opt) {
-    PrintF("[optimizing ");
-    function->ShortPrint();
-    PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
-           ms_codegen);
-  }
-  if (FLAG_trace_opt_stats) {
-    static double compilation_time = 0.0;
-    static int compiled_functions = 0;
-    static int code_size = 0;
-
-    compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
-    compiled_functions++;
-    code_size += function->shared()->SourceSize();
-    PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
-           compiled_functions, code_size, compilation_time);
-  }
-  if (FLAG_hydrogen_stats) {
-    isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
-                                                    time_taken_to_execute_,
-                                                    time_taken_to_finalize_);
-  }
-}
-
 // ----------------------------------------------------------------------------
 // Local helper methods that make up the compilation pipeline.
 
@@ -387,6 +257,16 @@
              Script::COMPILATION_TYPE_EVAL;
 }
 
+bool Parse(ParseInfo* info) {
+  // Create a canonical handle scope if compiling ignition bytecode. This is
+  // required by the constant array builder to de-duplicate objects without
+  // dereferencing handles.
+  std::unique_ptr<CanonicalHandleScope> canonical;
+  if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
+
+  return Parser::ParseStatic(info);
+}
+
 void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
                                CompilationInfo* info) {
   // Log the code generation. If source information is available include
@@ -466,18 +346,24 @@
   return info->shared_info()->PassesFilter(FLAG_ignition_filter);
 }
 
-int CodeAndMetadataSize(CompilationInfo* info) {
-  if (info->has_bytecode_array()) {
-    return info->bytecode_array()->SizeIncludingMetadata();
+CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
+  // Function should have been parsed and analyzed before creating a compilation
+  // job.
+  DCHECK_NOT_NULL(info->literal());
+  DCHECK_NOT_NULL(info->scope());
+
+  EnsureFeedbackMetadata(info);
+  if (ShouldUseIgnition(info)) {
+    return interpreter::Interpreter::NewCompilationJob(info);
+  } else {
+    return FullCodeGenerator::NewCompilationJob(info);
   }
-  return info->code()->SizeIncludingMetadata();
 }
 
 bool GenerateUnoptimizedCode(CompilationInfo* info) {
-  bool success;
-  EnsureFeedbackMetadata(info);
   if (FLAG_validate_asm && info->scope()->asm_module() &&
       !info->shared_info()->is_asm_wasm_broken()) {
+    EnsureFeedbackMetadata(info);
     MaybeHandle<FixedArray> wasm_data;
     wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
     if (!wasm_data.is_null()) {
@@ -486,19 +372,13 @@
       return true;
     }
   }
-  if (ShouldUseIgnition(info)) {
-    success = interpreter::Interpreter::MakeBytecode(info);
-  } else {
-    success = FullCodeGenerator::MakeCode(info);
-  }
-  if (success) {
-    Isolate* isolate = info->isolate();
-    Counters* counters = isolate->counters();
-    // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
-    counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
-    counters->total_baseline_compile_count()->Increment(1);
-  }
-  return success;
+
+  std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+  if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
+  if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+  if (job->FinalizeJob() != CompilationJob::SUCCEEDED) return false;
+  job->RecordUnoptimizedCompilationStats();
+  return true;
 }
 
 bool CompileUnoptimizedCode(CompilationInfo* info) {
@@ -514,8 +394,12 @@
 
 void InstallSharedScopeInfo(CompilationInfo* info,
                             Handle<SharedFunctionInfo> shared) {
-  Handle<ScopeInfo> scope_info = info->scope()->GetScopeInfo(info->isolate());
+  Handle<ScopeInfo> scope_info = info->scope()->scope_info();
   shared->set_scope_info(*scope_info);
+  Scope* outer_scope = info->scope()->GetOuterScopeWithContext();
+  if (outer_scope) {
+    shared->set_outer_scope_info(*outer_scope->scope_info());
+  }
 }
 
 void InstallSharedCompilationResult(CompilationInfo* info,
@@ -534,22 +418,8 @@
   }
 }
 
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
-  VMState<COMPILER> state(info->isolate());
-  PostponeInterruptsScope postpone(info->isolate());
-
-  // Create a canonical handle scope before internalizing parsed values if
-  // compiling bytecode. This is required for off-thread bytecode generation.
-  std::unique_ptr<CanonicalHandleScope> canonical;
-  if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
-
-  // Parse and update CompilationInfo with the results.
-  if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
+void InstallUnoptimizedCode(CompilationInfo* info) {
   Handle<SharedFunctionInfo> shared = info->shared_info();
-  DCHECK_EQ(shared->language_mode(), info->literal()->language_mode());
-
-  // Compile either unoptimized code or bytecode for the interpreter.
-  if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
 
   // Update the shared function info with the scope info.
   InstallSharedScopeInfo(info, shared);
@@ -559,10 +429,35 @@
 
   // Record the function compilation event.
   RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+}
+
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
+  VMState<COMPILER> state(info->isolate());
+  PostponeInterruptsScope postpone(info->isolate());
+
+  // Parse and update CompilationInfo with the results.
+  if (!Parse(info->parse_info())) return MaybeHandle<Code>();
+  DCHECK_EQ(info->shared_info()->language_mode(),
+            info->literal()->language_mode());
+
+  // Compile either unoptimized code or bytecode for the interpreter.
+  if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+
+  InstallUnoptimizedCode(info);
 
   return info->code();
 }
 
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+  CompilationJob::Status status = job->FinalizeJob();
+  if (status == CompilationJob::SUCCEEDED) {
+    DCHECK(!job->info()->shared_info()->is_compiled());
+    InstallUnoptimizedCode(job->info());
+    job->RecordUnoptimizedCompilationStats();
+  }
+  return status;
+}
+
 MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
     Handle<JSFunction> function, BailoutId osr_ast_id) {
   Handle<SharedFunctionInfo> shared(function->shared());
@@ -615,6 +510,14 @@
 }
 
 bool Renumber(ParseInfo* parse_info) {
+  // Create a canonical handle scope if compiling ignition bytecode. This is
+  // required by the constant array builder to de-duplicate objects without
+  // dereferencing handles.
+  std::unique_ptr<CanonicalHandleScope> canonical;
+  if (FLAG_ignition) {
+    canonical.reset(new CanonicalHandleScope(parse_info->isolate()));
+  }
+
   if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
                               parse_info->literal())) {
     return false;
@@ -669,8 +572,8 @@
   TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::RecompileSynchronous);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.RecompileSynchronous");
 
   if (job->PrepareJob() != CompilationJob::SUCCEEDED ||
       job->ExecuteJob() != CompilationJob::SUCCEEDED ||
@@ -684,7 +587,7 @@
   }
 
   // Success!
-  job->RecordOptimizationStats();
+  job->RecordOptimizedCompilationStats();
   DCHECK(!isolate->has_pending_exception());
   InsertCodeIntoOptimizedCodeMap(info);
   RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
@@ -713,10 +616,6 @@
     return false;
   }
 
-  // All handles below this point will be allocated in a deferred handle scope
-  // that is detached and handed off to the background thread when we return.
-  CompilationHandleScope handle_scope(info);
-
   // Parsing is not required when optimizing from existing bytecode.
   if (!info->is_optimizing_from_bytecode()) {
     if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
@@ -725,15 +624,11 @@
 
   JSFunction::EnsureLiterals(info->closure());
 
-  // Reopen handles in the new CompilationHandleScope.
-  info->ReopenHandlesInNewHandleScope();
-  info->parse_info()->ReopenHandlesInNewHandleScope();
-
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
   RuntimeCallTimerScope runtimeTimer(info->isolate(),
                                      &RuntimeCallStats::RecompileSynchronous);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.RecompileSynchronous");
 
   if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
   isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
@@ -808,14 +703,13 @@
     return MaybeHandle<Code>();
   }
 
-  CanonicalHandleScope canonical(isolate);
   TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::OptimizeCode);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
 
   // TurboFan can optimize directly from existing bytecode.
   if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) {
+    if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
     if (!Compiler::EnsureBytecode(info)) {
       if (isolate->has_pending_exception()) isolate->clear_pending_exception();
       return MaybeHandle<Code>();
@@ -831,6 +725,32 @@
     parse_info->set_lazy(false);
   }
 
+  // Verify that OSR compilations are delegated to the correct graph builder.
+  // Depending on the underlying frame the semantics of the {BailoutId} differ
+  // and the various graph builders hard-code a certain semantic:
+  //  - Interpreter : The BailoutId represents a bytecode offset.
+  //  - FullCodegen : The BailoutId represents the id of an AST node.
+  DCHECK_IMPLIES(info->is_osr() && ignition_osr,
+                 info->is_optimizing_from_bytecode());
+  DCHECK_IMPLIES(info->is_osr() && !ignition_osr,
+                 !info->is_optimizing_from_bytecode());
+
+  // In case of concurrent recompilation, all handles below this point will be
+  // allocated in a deferred handle scope that is detached and handed off to
+  // the background thread when we return.
+  std::unique_ptr<CompilationHandleScope> compilation;
+  if (mode == Compiler::CONCURRENT) {
+    compilation.reset(new CompilationHandleScope(info));
+  }
+
+  // In case of TurboFan, all handles below will be canonicalized.
+  std::unique_ptr<CanonicalHandleScope> canonical;
+  if (use_turbofan) canonical.reset(new CanonicalHandleScope(info->isolate()));
+
+  // Reopen handles in the new CompilationHandleScope.
+  info->ReopenHandlesInNewHandleScope();
+  parse_info->ReopenHandlesInNewHandleScope();
+
   if (mode == Compiler::CONCURRENT) {
     if (GetOptimizedCodeLater(job.get())) {
       job.release();  // The background recompile job owns this now.
@@ -844,6 +764,60 @@
   return MaybeHandle<Code>();
 }
 
+CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
+  CompilationInfo* info = job->info();
+  Isolate* isolate = info->isolate();
+
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::RecompileSynchronous);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.RecompileSynchronous");
+
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  shared->code()->set_profiler_ticks(0);
+
+  DCHECK(!shared->HasDebugInfo());
+
+  // 1) Optimization on the concurrent thread may have failed.
+  // 2) The function may have already been optimized by OSR.  Simply continue.
+  //    Except when OSR already disabled optimization for some reason.
+  // 3) The code may have already been invalidated due to dependency change.
+  // 4) Code generation may have failed.
+  if (job->state() == CompilationJob::State::kReadyToFinalize) {
+    if (shared->optimization_disabled()) {
+      job->RetryOptimization(kOptimizationDisabled);
+    } else if (info->dependencies()->HasAborted()) {
+      job->RetryOptimization(kBailedOutDueToDependencyChange);
+    } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
+      job->RecordOptimizedCompilationStats();
+      RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+      if (shared
+              ->SearchOptimizedCodeMap(info->context()->native_context(),
+                                       info->osr_ast_id())
+              .code == nullptr) {
+        InsertCodeIntoOptimizedCodeMap(info);
+      }
+      if (FLAG_trace_opt) {
+        PrintF("[completed optimizing ");
+        info->closure()->ShortPrint();
+        PrintF("]\n");
+      }
+      info->closure()->ReplaceCode(*info->code());
+      return CompilationJob::SUCCEEDED;
+    }
+  }
+
+  DCHECK(job->state() == CompilationJob::State::kFailed);
+  if (FLAG_trace_opt) {
+    PrintF("[aborted optimizing ");
+    info->closure()->ShortPrint();
+    PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
+  }
+  info->closure()->ReplaceCode(shared->code());
+  return CompilationJob::FAILED;
+}
+
 class InterpreterActivationsFinder : public ThreadVisitor,
                                      public OptimizedFunctionVisitor {
  public:
@@ -942,7 +916,7 @@
   // baseline code because there might be suspended activations stored in
   // generator objects on the heap. We could eventually go directly to
   // TurboFan in this case.
-  if (function->shared()->is_resumable()) {
+  if (IsResumableFunction(function->shared()->kind())) {
     return MaybeHandle<Code>();
   }
 
@@ -978,7 +952,7 @@
   }
 
   // Parse and update CompilationInfo with the results.
-  if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+  if (!Parse(info.parse_info())) return MaybeHandle<Code>();
   Handle<SharedFunctionInfo> shared = info.shared_info();
   DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
 
@@ -1014,22 +988,19 @@
   TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::CompileCodeLazy);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::CompileCodeLazy);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
   AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
 
-  if (FLAG_turbo_cache_shared_code) {
-    Handle<Code> cached_code;
-    if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
-            .ToHandle(&cached_code)) {
-      if (FLAG_trace_opt) {
-        PrintF("[found optimized code for ");
-        function->ShortPrint();
-        PrintF(" during unoptimized compile]\n");
-      }
-      DCHECK(function->shared()->is_compiled());
-      return cached_code;
+  Handle<Code> cached_code;
+  if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
+          .ToHandle(&cached_code)) {
+    if (FLAG_trace_opt) {
+      PrintF("[found optimized code for ");
+      function->ShortPrint();
+      PrintF(" during unoptimized compile]\n");
     }
+    DCHECK(function->shared()->is_compiled());
+    return cached_code;
   }
 
   if (function->shared()->is_compiled()) {
@@ -1076,18 +1047,12 @@
   Isolate* isolate = info->isolate();
   TimerEventScope<TimerEventCompileCode> timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::CompileCode);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
   PostponeInterruptsScope postpone(isolate);
   DCHECK(!isolate->native_context().is_null());
   ParseInfo* parse_info = info->parse_info();
   Handle<Script> script = parse_info->script();
 
-  // Create a canonical handle scope before internalizing parsed values if
-  // compiling bytecode. This is required for off-thread bytecode generation.
-  std::unique_ptr<CanonicalHandleScope> canonical;
-  if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate));
-
   // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
   FixedArray* array = isolate->native_context()->embedder_data();
   script->set_context_data(array->get(v8::Context::kDebugIdIndex));
@@ -1131,7 +1096,7 @@
         parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
       }
 
-      if (!Parser::ParseStatic(parse_info)) {
+      if (!Parse(parse_info)) {
         return Handle<SharedFunctionInfo>::null();
       }
     }
@@ -1150,10 +1115,8 @@
                                ? info->isolate()->counters()->compile_eval()
                                : info->isolate()->counters()->compile();
     HistogramTimerScope timer(rate);
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-        isolate,
-        (parse_info->is_eval() ? &tracing::TraceEventStatsTable::CompileEval
-                               : &tracing::TraceEventStatsTable::Compile));
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+                 parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
 
     // Allocate a shared function info object.
     DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
@@ -1203,14 +1166,14 @@
 bool Compiler::Analyze(ParseInfo* info) {
   DCHECK_NOT_NULL(info->literal());
   if (!Rewriter::Rewrite(info)) return false;
-  Scope::Analyze(info);
+  DeclarationScope::Analyze(info, AnalyzeMode::kRegular);
   if (!Renumber(info)) return false;
   DCHECK_NOT_NULL(info->scope());
   return true;
 }
 
 bool Compiler::ParseAndAnalyze(ParseInfo* info) {
-  if (!Parser::ParseStatic(info)) return false;
+  if (!Parse(info)) return false;
   if (!Compiler::Analyze(info)) return false;
   DCHECK_NOT_NULL(info->literal());
   DCHECK_NOT_NULL(info->scope());
@@ -1390,10 +1353,18 @@
 }
 
 bool Compiler::EnsureBytecode(CompilationInfo* info) {
-  DCHECK(ShouldUseIgnition(info));
+  if (!ShouldUseIgnition(info)) return false;
   if (!info->shared_info()->HasBytecodeArray()) {
-    DCHECK(!info->shared_info()->is_compiled());
+    Handle<Code> original_code(info->shared_info()->code());
     if (GetUnoptimizedCode(info).is_null()) return false;
+    if (info->shared_info()->HasAsmWasmData()) return false;
+    DCHECK(info->shared_info()->is_compiled());
+    if (original_code->kind() == Code::FUNCTION) {
+      // Generating bytecode will install the {InterpreterEntryTrampoline} as
+      // shared code on the function. To avoid an implicit tier down we restore
+      // original baseline code in case it existed beforehand.
+      info->shared_info()->ReplaceCode(*original_code);
+    }
   }
   DCHECK(info->shared_info()->HasBytecodeArray());
   return true;
@@ -1414,7 +1385,7 @@
     // baseline code because there might be suspended activations stored in
     // generator objects on the heap. We could eventually go directly to
     // TurboFan in this case.
-    if (shared->is_resumable()) return false;
+    if (IsResumableFunction(shared->kind())) return false;
 
     // TODO(4280): For now we disable switching to baseline code in the presence
     // of interpreter activations of the given function. The reasons is that the
@@ -1513,7 +1484,9 @@
     if (context->IsNativeContext()) parse_info.set_global();
     parse_info.set_language_mode(language_mode);
     parse_info.set_parse_restriction(restriction);
-    parse_info.set_context(context);
+    if (!context->IsNativeContext()) {
+      parse_info.set_outer_scope_info(handle(context->scope_info()));
+    }
 
     shared_info = CompileToplevel(&info);
 
@@ -1629,8 +1602,8 @@
       HistogramTimerScope timer(isolate->counters()->compile_deserialize());
       RuntimeCallTimerScope runtimeTimer(isolate,
                                          &RuntimeCallStats::CompileDeserialize);
-      TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-          isolate, &tracing::TraceEventStatsTable::CompileDeserialize);
+      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+                   "V8.CompileDeserialize");
       Handle<SharedFunctionInfo> result;
       if (CodeSerializer::Deserialize(isolate, *cached_data, source)
               .ToHandle(&result)) {
@@ -1686,7 +1659,9 @@
     }
     parse_info.set_compile_options(compile_options);
     parse_info.set_extension(extension);
-    parse_info.set_context(context);
+    if (!context->IsNativeContext()) {
+      parse_info.set_outer_scope_info(handle(context->scope_info()));
+    }
     if (FLAG_serialize_toplevel &&
         compile_options == ScriptCompiler::kProduceCodeCache) {
       info.PrepareForSerializing();
@@ -1703,8 +1678,8 @@
             isolate->counters()->compile_serialize());
         RuntimeCallTimerScope runtimeTimer(isolate,
                                            &RuntimeCallStats::CompileSerialize);
-        TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-            isolate, &tracing::TraceEventStatsTable::CompileSerialize);
+        TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+                     "V8.CompileSerialize");
         *cached_data = CodeSerializer::Serialize(isolate, result, source);
         if (FLAG_profile_deserialization) {
           PrintF("[Compiling and serializing took %0.3f ms]\n",
@@ -1822,17 +1797,14 @@
   // Generate code
   TimerEventScope<TimerEventCompileCode> timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::CompileCode);
-
-  // Create a canonical handle scope if compiling ignition bytecode. This is
-  // required by the constant array builder to de-duplicate common objects
-  // without dereferencing handles.
-  std::unique_ptr<CanonicalHandleScope> canonical;
-  if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info.isolate()));
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
 
   if (lazy) {
     info.SetCode(isolate->builtins()->CompileLazy());
+    Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+    if (outer_scope) {
+      result->set_outer_scope_info(*outer_scope->scope_info());
+    }
   } else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
     // Code generation will ensure that the feedback vector is present and
     // appropriately sized.
@@ -1876,6 +1848,7 @@
   Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
       name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
       Handle<ScopeInfo>(fun->shared()->scope_info()));
+  shared->set_outer_scope_info(fun->shared()->outer_scope_info());
   shared->SetConstructStub(*construct_stub);
   shared->set_feedback_metadata(fun->shared()->feedback_metadata());
 
@@ -1895,58 +1868,28 @@
   return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
 }
 
-void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
+CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
+    CompilationInfo* info) {
+  VMState<COMPILER> state(info->isolate());
+  std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+  if (job->PrepareJob() != CompilationJob::SUCCEEDED) {
+    return nullptr;
+  }
+  return job.release();
+}
+
+bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
   // Take ownership of compilation job.  Deleting job also tears down the zone.
   std::unique_ptr<CompilationJob> job(raw_job);
-  CompilationInfo* info = job->info();
-  Isolate* isolate = info->isolate();
 
-  VMState<COMPILER> state(isolate);
-  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
-  RuntimeCallTimerScope runtimeTimer(isolate,
-                                     &RuntimeCallStats::RecompileSynchronous);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
-
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  shared->code()->set_profiler_ticks(0);
-
-  DCHECK(!shared->HasDebugInfo());
-
-  // 1) Optimization on the concurrent thread may have failed.
-  // 2) The function may have already been optimized by OSR.  Simply continue.
-  //    Except when OSR already disabled optimization for some reason.
-  // 3) The code may have already been invalidated due to dependency change.
-  // 4) Code generation may have failed.
-  if (job->state() == CompilationJob::State::kReadyToFinalize) {
-    if (shared->optimization_disabled()) {
-      job->RetryOptimization(kOptimizationDisabled);
-    } else if (info->dependencies()->HasAborted()) {
-      job->RetryOptimization(kBailedOutDueToDependencyChange);
-    } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
-      job->RecordOptimizationStats();
-      RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
-      if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
-                                         info->osr_ast_id()).code == nullptr) {
-        InsertCodeIntoOptimizedCodeMap(info);
-      }
-      if (FLAG_trace_opt) {
-        PrintF("[completed optimizing ");
-        info->closure()->ShortPrint();
-        PrintF("]\n");
-      }
-      info->closure()->ReplaceCode(*info->code());
-      return;
-    }
+  VMState<COMPILER> state(job->info()->isolate());
+  if (job->info()->IsOptimizing()) {
+    return FinalizeOptimizedCompilationJob(job.get()) ==
+           CompilationJob::SUCCEEDED;
+  } else {
+    return FinalizeUnoptimizedCompilationJob(job.get()) ==
+           CompilationJob::SUCCEEDED;
   }
-
-  DCHECK(job->state() == CompilationJob::State::kFailed);
-  if (FLAG_trace_opt) {
-    PrintF("[aborted optimizing ");
-    info->closure()->ShortPrint();
-    PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
-  }
-  info->closure()->ReplaceCode(shared->code());
 }
 
 void Compiler::PostInstantiation(Handle<JSFunction> function,
diff --git a/src/compiler.h b/src/compiler.h
index 5521573..bfeaa8e 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -9,14 +9,9 @@
 
 #include "src/allocation.h"
 #include "src/bailout-reason.h"
-#include "src/compilation-dependencies.h"
 #include "src/contexts.h"
-#include "src/frames.h"
 #include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/source-position-table.h"
-#include "src/source-position.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -57,8 +52,12 @@
   static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
   static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
 
+  // Prepare a compilation job for unoptimized code. Requires ParseAndAnalyse.
+  static CompilationJob* PrepareUnoptimizedCompilationJob(
+      CompilationInfo* info);
+
   // Generate and install code from previously queued compilation job.
-  static void FinalizeCompilationJob(CompilationJob* job);
+  static bool FinalizeCompilationJob(CompilationJob* job);
 
   // Give the compiler a chance to perform low-latency initialization tasks of
   // the given {function} on its instantiation. Note that only the runtime will
@@ -138,405 +137,6 @@
       JavaScriptFrame* osr_frame);
 };
 
-
-// CompilationInfo encapsulates some information known at compile time.  It
-// is constructed based on the resources available at compile-time.
-class CompilationInfo final {
- public:
-  // Various configuration flags for a compilation, as well as some properties
-  // of the compiled code produced by a compilation.
-  enum Flag {
-    kDeferredCalling = 1 << 0,
-    kNonDeferredCalling = 1 << 1,
-    kSavesCallerDoubles = 1 << 2,
-    kRequiresFrame = 1 << 3,
-    kMustNotHaveEagerFrame = 1 << 4,
-    kDeoptimizationSupport = 1 << 5,
-    kDebug = 1 << 6,
-    kSerializing = 1 << 7,
-    kFunctionContextSpecializing = 1 << 8,
-    kFrameSpecializing = 1 << 9,
-    kNativeContextSpecializing = 1 << 10,
-    kInliningEnabled = 1 << 11,
-    kDisableFutureOptimization = 1 << 12,
-    kSplittingEnabled = 1 << 13,
-    kDeoptimizationEnabled = 1 << 14,
-    kSourcePositionsEnabled = 1 << 15,
-    kBailoutOnUninitialized = 1 << 16,
-    kOptimizeFromBytecode = 1 << 17,
-    kTypeFeedbackEnabled = 1 << 18,
-    kAccessorInliningEnabled = 1 << 19,
-  };
-
-  CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
-  CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
-                  Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
-  ~CompilationInfo();
-
-  ParseInfo* parse_info() const { return parse_info_; }
-
-  // -----------------------------------------------------------
-  // TODO(titzer): inline and delete accessors of ParseInfo
-  // -----------------------------------------------------------
-  Handle<Script> script() const;
-  FunctionLiteral* literal() const;
-  DeclarationScope* scope() const;
-  Handle<Context> context() const;
-  Handle<SharedFunctionInfo> shared_info() const;
-  bool has_shared_info() const;
-  // -----------------------------------------------------------
-
-  Isolate* isolate() const {
-    return isolate_;
-  }
-  Zone* zone() { return zone_; }
-  bool is_osr() const { return !osr_ast_id_.IsNone(); }
-  Handle<JSFunction> closure() const { return closure_; }
-  Handle<Code> code() const { return code_; }
-  Code::Flags code_flags() const { return code_flags_; }
-  BailoutId osr_ast_id() const { return osr_ast_id_; }
-  JavaScriptFrame* osr_frame() const { return osr_frame_; }
-  int num_parameters() const;
-  int num_parameters_including_this() const;
-  bool is_this_defined() const;
-
-  void set_parameter_count(int parameter_count) {
-    DCHECK(IsStub());
-    parameter_count_ = parameter_count;
-  }
-
-  bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
-  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
-  bool is_tracking_positions() const { return track_positions_; }
-
-  bool is_calling() const {
-    return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
-  }
-
-  void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
-
-  bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
-
-  void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
-
-  bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
-
-  void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
-
-  bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
-
-  void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
-
-  bool requires_frame() const { return GetFlag(kRequiresFrame); }
-
-  void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
-
-  bool GetMustNotHaveEagerFrame() const {
-    return GetFlag(kMustNotHaveEagerFrame);
-  }
-
-  // Compiles marked as debug produce unoptimized code with debug break slots.
-  // Inner functions that cannot be compiled w/o context are compiled eagerly.
-  // Always include deoptimization support to avoid having to recompile again.
-  void MarkAsDebug() {
-    SetFlag(kDebug);
-    SetFlag(kDeoptimizationSupport);
-  }
-
-  bool is_debug() const { return GetFlag(kDebug); }
-
-  void PrepareForSerializing() { SetFlag(kSerializing); }
-
-  bool will_serialize() const { return GetFlag(kSerializing); }
-
-  void MarkAsFunctionContextSpecializing() {
-    SetFlag(kFunctionContextSpecializing);
-  }
-
-  bool is_function_context_specializing() const {
-    return GetFlag(kFunctionContextSpecializing);
-  }
-
-  void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
-
-  bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
-
-  void MarkAsNativeContextSpecializing() {
-    SetFlag(kNativeContextSpecializing);
-  }
-
-  bool is_native_context_specializing() const {
-    return GetFlag(kNativeContextSpecializing);
-  }
-
-  void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
-
-  bool is_deoptimization_enabled() const {
-    return GetFlag(kDeoptimizationEnabled);
-  }
-
-  void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
-
-  bool is_type_feedback_enabled() const {
-    return GetFlag(kTypeFeedbackEnabled);
-  }
-
-  void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
-
-  bool is_accessor_inlining_enabled() const {
-    return GetFlag(kAccessorInliningEnabled);
-  }
-
-  void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
-
-  bool is_source_positions_enabled() const {
-    return GetFlag(kSourcePositionsEnabled);
-  }
-
-  void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
-
-  bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
-
-  void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
-
-  bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
-
-  void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
-
-  bool is_bailout_on_uninitialized() const {
-    return GetFlag(kBailoutOnUninitialized);
-  }
-
-  void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
-
-  bool is_optimizing_from_bytecode() const {
-    return GetFlag(kOptimizeFromBytecode);
-  }
-
-  bool GeneratePreagedPrologue() const {
-    // Generate a pre-aged prologue if we are optimizing for size, which
-    // will make code flushing more aggressive. Only apply to Code::FUNCTION,
-    // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
-    return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
-           output_code_kind() == Code::FUNCTION;
-  }
-
-  void SetCode(Handle<Code> code) { code_ = code; }
-
-  void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
-    bytecode_array_ = bytecode_array;
-  }
-
-  bool ShouldTrapOnDeopt() const {
-    return (FLAG_trap_on_deopt && IsOptimizing()) ||
-        (FLAG_trap_on_stub_deopt && IsStub());
-  }
-
-  bool has_native_context() const {
-    return !closure().is_null() && (closure()->native_context() != nullptr);
-  }
-
-  Context* native_context() const {
-    return has_native_context() ? closure()->native_context() : nullptr;
-  }
-
-  bool has_global_object() const { return has_native_context(); }
-
-  JSGlobalObject* global_object() const {
-    return has_global_object() ? native_context()->global_object() : nullptr;
-  }
-
-  // Accessors for the different compilation modes.
-  bool IsOptimizing() const { return mode_ == OPTIMIZE; }
-  bool IsStub() const { return mode_ == STUB; }
-  void SetOptimizing() {
-    DCHECK(has_shared_info());
-    SetMode(OPTIMIZE);
-    optimization_id_ = isolate()->NextOptimizationId();
-    code_flags_ =
-        Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
-  }
-  void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
-    SetOptimizing();
-    osr_ast_id_ = osr_ast_id;
-    osr_frame_ = osr_frame;
-  }
-
-  // Deoptimization support.
-  bool HasDeoptimizationSupport() const {
-    return GetFlag(kDeoptimizationSupport);
-  }
-  void EnableDeoptimizationSupport() {
-    DCHECK_EQ(BASE, mode_);
-    SetFlag(kDeoptimizationSupport);
-  }
-  bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
-
-  bool ExpectsJSReceiverAsReceiver();
-
-  // Determines whether or not to insert a self-optimization header.
-  bool ShouldSelfOptimize();
-
-  void set_deferred_handles(DeferredHandles* deferred_handles) {
-    DCHECK(deferred_handles_ == NULL);
-    deferred_handles_ = deferred_handles;
-  }
-
-  void ReopenHandlesInNewHandleScope() {
-    closure_ = Handle<JSFunction>(*closure_);
-  }
-
-  void AbortOptimization(BailoutReason reason) {
-    DCHECK(reason != kNoReason);
-    if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
-    SetFlag(kDisableFutureOptimization);
-  }
-
-  void RetryOptimization(BailoutReason reason) {
-    DCHECK(reason != kNoReason);
-    if (GetFlag(kDisableFutureOptimization)) return;
-    bailout_reason_ = reason;
-  }
-
-  BailoutReason bailout_reason() const { return bailout_reason_; }
-
-  int prologue_offset() const {
-    DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
-    return prologue_offset_;
-  }
-
-  void set_prologue_offset(int prologue_offset) {
-    DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
-    prologue_offset_ = prologue_offset;
-  }
-
-  CompilationDependencies* dependencies() { return &dependencies_; }
-
-  int optimization_id() const { return optimization_id_; }
-
-  int osr_expr_stack_height() { return osr_expr_stack_height_; }
-  void set_osr_expr_stack_height(int height) {
-    DCHECK(height >= 0);
-    osr_expr_stack_height_ = height;
-  }
-
-  bool has_simple_parameters();
-
-  struct InlinedFunctionHolder {
-    Handle<SharedFunctionInfo> shared_info;
-
-    // Root that holds the unoptimized code of the inlined function alive
-    // (and out of reach of code flushing) until we finish compilation.
-    // Do not remove.
-    Handle<Code> inlined_code_object_root;
-
-    explicit InlinedFunctionHolder(
-        Handle<SharedFunctionInfo> inlined_shared_info)
-        : shared_info(inlined_shared_info),
-          inlined_code_object_root(inlined_shared_info->code()) {}
-  };
-
-  typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
-  InlinedFunctionList const& inlined_functions() const {
-    return inlined_functions_;
-  }
-
-  void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
-    inlined_functions_.push_back(InlinedFunctionHolder(inlined_function));
-  }
-
-  std::unique_ptr<char[]> GetDebugName() const;
-
-  Code::Kind output_code_kind() const {
-    return Code::ExtractKindFromFlags(code_flags_);
-  }
-
-  StackFrame::Type GetOutputStackFrameType() const;
-
-  int GetDeclareGlobalsFlags() const;
-
-  SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
-
- private:
-  // Compilation mode.
-  // BASE is generated by the full codegen, optionally prepared for bailouts.
-  // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
-  enum Mode {
-    BASE,
-    OPTIMIZE,
-    STUB
-  };
-
-  CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
-                  Code::Flags code_flags, Mode mode, Isolate* isolate,
-                  Zone* zone);
-
-  ParseInfo* parse_info_;
-  Isolate* isolate_;
-
-  void SetMode(Mode mode) {
-    mode_ = mode;
-  }
-
-  void SetFlag(Flag flag) { flags_ |= flag; }
-
-  void SetFlag(Flag flag, bool value) {
-    flags_ = value ? flags_ | flag : flags_ & ~flag;
-  }
-
-  bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
-
-  unsigned flags_;
-
-  Code::Flags code_flags_;
-
-  Handle<JSFunction> closure_;
-
-  // The compiled code.
-  Handle<Code> code_;
-
-  // Compilation mode flag and whether deoptimization is allowed.
-  Mode mode_;
-  BailoutId osr_ast_id_;
-
-  // Holds the bytecode array generated by the interpreter.
-  // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
-  // refactored to avoid us needing to carry the BytcodeArray around.
-  Handle<BytecodeArray> bytecode_array_;
-
-  // The zone from which the compilation pipeline working on this
-  // CompilationInfo allocates.
-  Zone* zone_;
-
-  DeferredHandles* deferred_handles_;
-
-  // Dependencies for this compilation, e.g. stable maps.
-  CompilationDependencies dependencies_;
-
-  BailoutReason bailout_reason_;
-
-  int prologue_offset_;
-
-  bool track_positions_;
-
-  InlinedFunctionList inlined_functions_;
-
-  // Number of parameters used for compilation of stubs that require arguments.
-  int parameter_count_;
-
-  int optimization_id_;
-
-  int osr_expr_stack_height_;
-
-  // The current OSR frame for specialization or {nullptr}.
-  JavaScriptFrame* osr_frame_ = nullptr;
-
-  Vector<const char> debug_name_;
-
-  DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
-};
-
 // A base class for compilation jobs intended to run concurrent to the main
 // thread. The job is split into three phases which are called in sequence on
 // different threads and with different limitations:
@@ -557,15 +157,20 @@
     kFailed,
   };
 
-  explicit CompilationJob(CompilationInfo* info, const char* compiler_name,
-                          State initial_state = State::kReadyToPrepare)
-      : info_(info), compiler_name_(compiler_name), state_(initial_state) {}
+  CompilationJob(Isolate* isolate, CompilationInfo* info,
+                 const char* compiler_name,
+                 State initial_state = State::kReadyToPrepare)
+      : info_(info),
+        compiler_name_(compiler_name),
+        state_(initial_state),
+        stack_limit_(isolate->stack_guard()->real_climit()) {}
   virtual ~CompilationJob() {}
 
   // Prepare the compile job. Must be called on the main thread.
   MUST_USE_RESULT Status PrepareJob();
 
-  // Executes the compile job. Can be called off the main thread.
+  // Executes the compile job. Can be called on a background thread if
+  // can_execute_on_background_thread() returns true.
   MUST_USE_RESULT Status ExecuteJob();
 
   // Finalizes the compile job. Must be called on the main thread.
@@ -573,27 +178,23 @@
 
   // Report a transient failure, try again next time. Should only be called on
   // optimization compilation jobs.
-  Status RetryOptimization(BailoutReason reason) {
-    DCHECK(info_->IsOptimizing());
-    info_->RetryOptimization(reason);
-    state_ = State::kFailed;
-    return FAILED;
-  }
+  Status RetryOptimization(BailoutReason reason);
 
   // Report a persistent failure, disable future optimization on the function.
   // Should only be called on optimization compilation jobs.
-  Status AbortOptimization(BailoutReason reason) {
-    DCHECK(info_->IsOptimizing());
-    info_->AbortOptimization(reason);
-    state_ = State::kFailed;
-    return FAILED;
-  }
+  Status AbortOptimization(BailoutReason reason);
 
-  void RecordOptimizationStats();
+  void RecordOptimizedCompilationStats() const;
+  void RecordUnoptimizedCompilationStats() const;
+
+  virtual bool can_execute_on_background_thread() const { return true; }
+
+  void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+  uintptr_t stack_limit() const { return stack_limit_; }
 
   State state() const { return state_; }
   CompilationInfo* info() const { return info_; }
-  Isolate* isolate() const { return info()->isolate(); }
+  Isolate* isolate() const;
 
  protected:
   // Overridden by the actual implementation.
@@ -612,6 +213,7 @@
   base::TimeDelta time_taken_to_finalize_;
   const char* compiler_name_;
   State state_;
+  uintptr_t stack_limit_;
 
   MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
     if (status == SUCCEEDED) {
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index c43a53f..5301434 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -4,21 +4,29 @@
 
 #include "src/compiler/access-builder.h"
 
+#include "src/compiler/type-cache.h"
 #include "src/contexts.h"
 #include "src/frames.h"
 #include "src/handles-inl.h"
 #include "src/heap/heap.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
 // static
+FieldAccess AccessBuilder::ForExternalDoubleValue() {
+  FieldAccess access = {kUntaggedBase,          0,
+                        MaybeHandle<Name>(),    Type::Number(),
+                        MachineType::Float64(), kNoWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForMap() {
   FieldAccess access = {
-      kTaggedBase,           HeapObject::kMapOffset,   MaybeHandle<Name>(),
-      Type::OtherInternal(), MachineType::AnyTagged(), kMapWriteBarrier};
+      kTaggedBase,           HeapObject::kMapOffset,       MaybeHandle<Name>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
   return access;
 }
 
@@ -38,8 +46,8 @@
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
   FieldAccess access = {
-      kTaggedBase,      JSObject::kPropertiesOffset, MaybeHandle<Name>(),
-      Type::Internal(), MachineType::AnyTagged(),    kPointerWriteBarrier};
+      kTaggedBase,      JSObject::kPropertiesOffset,  MaybeHandle<Name>(),
+      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
@@ -47,8 +55,8 @@
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
   FieldAccess access = {
-      kTaggedBase,      JSObject::kElementsOffset, MaybeHandle<Name>(),
-      Type::Internal(), MachineType::AnyTagged(),  kPointerWriteBarrier};
+      kTaggedBase,      JSObject::kElementsOffset,    MaybeHandle<Name>(),
+      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
@@ -60,7 +68,7 @@
   FieldAccess access = {kTaggedBase,
                         offset,
                         MaybeHandle<Name>(),
-                        Type::Tagged(),
+                        Type::NonInternal(),
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
@@ -93,7 +101,7 @@
                         JSFunction::kSharedFunctionInfoOffset,
                         Handle<Name>(),
                         Type::OtherInternal(),
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -101,19 +109,16 @@
 // static
 FieldAccess AccessBuilder::ForJSFunctionLiterals() {
   FieldAccess access = {
-      kTaggedBase,      JSFunction::kLiteralsOffset, Handle<Name>(),
-      Type::Internal(), MachineType::AnyTagged(),    kPointerWriteBarrier};
+      kTaggedBase,      JSFunction::kLiteralsOffset,  Handle<Name>(),
+      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kCodeEntryOffset,
-                        Handle<Name>(),
-                        Type::UntaggedPointer(),
-                        MachineType::Pointer(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSFunction::kCodeEntryOffset, Handle<Name>(),
+      Type::OtherInternal(), MachineType::Pointer(),       kNoWriteBarrier};
   return access;
 }
 
@@ -134,7 +139,7 @@
                         JSGeneratorObject::kContextOffset,
                         Handle<Name>(),
                         Type::Internal(),
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -146,7 +151,7 @@
                         JSGeneratorObject::kContinuationOffset,
                         Handle<Name>(),
                         type_cache.kSmi,
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedSigned(),
                         kNoWriteBarrier};
   return access;
 }
@@ -176,12 +181,9 @@
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
   TypeCache const& type_cache = TypeCache::Get();
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kResumeModeOffset,
-                        Handle<Name>(),
-                        type_cache.kSmi,
-                        MachineType::AnyTagged(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,     JSGeneratorObject::kResumeModeOffset, Handle<Name>(),
+      type_cache.kSmi, MachineType::TaggedSigned(),          kNoWriteBarrier};
   return access;
 }
 
@@ -192,7 +194,7 @@
                         JSArray::kLengthOffset,
                         Handle<Name>(),
                         type_cache.kJSArrayLengthType,
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedSigned(),
                         kFullWriteBarrier};
   if (IsFastDoubleElementsKind(elements_kind)) {
     access.type = type_cache.kFixedDoubleArrayLengthType;
@@ -210,7 +212,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBuffer::kBackingStoreOffset,
                         MaybeHandle<Name>(),
-                        Type::UntaggedPointer(),
+                        Type::OtherInternal(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
   return access;
@@ -229,8 +231,8 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBufferView::kBufferOffset,
                         MaybeHandle<Name>(),
-                        Type::TaggedPointer(),
-                        MachineType::AnyTagged(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -263,12 +265,23 @@
                         JSTypedArray::kLengthOffset,
                         MaybeHandle<Name>(),
                         TypeCache::Get().kJSTypedArrayLengthType,
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedSigned(),
                         kNoWriteBarrier};
   return access;
 }
 
 // static
+FieldAccess AccessBuilder::ForJSDateValue() {
+  FieldAccess access = {kTaggedBase,
+                        JSDate::kValueOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kJSDateValueType,
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
   FieldAccess access = {kTaggedBase,
                         JSDate::kValueOffset + index * kPointerSize,
@@ -301,8 +314,8 @@
 // static
 FieldAccess AccessBuilder::ForJSRegExpFlags() {
   FieldAccess access = {
-      kTaggedBase,    JSRegExp::kFlagsOffset,   MaybeHandle<Name>(),
-      Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+      kTaggedBase,         JSRegExp::kFlagsOffset,   MaybeHandle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
@@ -310,8 +323,8 @@
 // static
 FieldAccess AccessBuilder::ForJSRegExpSource() {
   FieldAccess access = {
-      kTaggedBase,    JSRegExp::kSourceOffset,  MaybeHandle<Name>(),
-      Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+      kTaggedBase,         JSRegExp::kSourceOffset,  MaybeHandle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
@@ -322,7 +335,7 @@
                         FixedArray::kLengthOffset,
                         MaybeHandle<Name>(),
                         TypeCache::Get().kFixedArrayLengthType,
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedSigned(),
                         kNoWriteBarrier};
   return access;
 }
@@ -332,7 +345,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedTypedArrayBase::kBasePointerOffset,
                         MaybeHandle<Name>(),
-                        Type::Tagged(),
+                        Type::OtherInternal(),
                         MachineType::AnyTagged(),
                         kPointerWriteBarrier};
   return access;
@@ -343,7 +356,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedTypedArrayBase::kExternalPointerOffset,
                         MaybeHandle<Name>(),
-                        Type::UntaggedPointer(),
+                        Type::OtherInternal(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
   return access;
@@ -354,8 +367,8 @@
   FieldAccess access = {kTaggedBase,
                         DescriptorArray::kEnumCacheOffset,
                         Handle<Name>(),
-                        Type::TaggedPointer(),
-                        MachineType::AnyTagged(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -366,8 +379,8 @@
   FieldAccess access = {kTaggedBase,
                         DescriptorArray::kEnumCacheBridgeCacheOffset,
                         Handle<Name>(),
-                        Type::TaggedPointer(),
-                        MachineType::AnyTagged(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -393,9 +406,12 @@
 
 // static
 FieldAccess AccessBuilder::ForMapDescriptors() {
-  FieldAccess access = {
-      kTaggedBase,           Map::kDescriptorsOffset,  Handle<Name>(),
-      Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,
+                        Map::kDescriptorsOffset,
+                        Handle<Name>(),
+                        Type::OtherInternal(),
+                        MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -412,8 +428,8 @@
 // static
 FieldAccess AccessBuilder::ForMapPrototype() {
   FieldAccess access = {
-      kTaggedBase,           Map::kPrototypeOffset,    Handle<Name>(),
-      Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
+      kTaggedBase, Map::kPrototypeOffset,        Handle<Name>(),
+      Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
@@ -432,7 +448,7 @@
                         String::kLengthOffset,
                         Handle<Name>(),
                         TypeCache::Get().kStringLengthType,
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedSigned(),
                         kNoWriteBarrier};
   return access;
 }
@@ -440,16 +456,16 @@
 // static
 FieldAccess AccessBuilder::ForConsStringFirst() {
   FieldAccess access = {
-      kTaggedBase,    ConsString::kFirstOffset, Handle<Name>(),
-      Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+      kTaggedBase,    ConsString::kFirstOffset,     Handle<Name>(),
+      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForConsStringSecond() {
   FieldAccess access = {
-      kTaggedBase,    ConsString::kSecondOffset, Handle<Name>(),
-      Type::String(), MachineType::AnyTagged(),  kPointerWriteBarrier};
+      kTaggedBase,    ConsString::kSecondOffset,    Handle<Name>(),
+      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
@@ -457,15 +473,15 @@
 FieldAccess AccessBuilder::ForSlicedStringOffset() {
   FieldAccess access = {
       kTaggedBase,         SlicedString::kOffsetOffset, Handle<Name>(),
-      Type::SignedSmall(), MachineType::AnyTagged(),    kNoWriteBarrier};
+      Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForSlicedStringParent() {
   FieldAccess access = {
-      kTaggedBase,    SlicedString::kParentOffset, Handle<Name>(),
-      Type::String(), MachineType::AnyTagged(),    kPointerWriteBarrier};
+      kTaggedBase,    SlicedString::kParentOffset,  Handle<Name>(),
+      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
   return access;
 }
 
@@ -474,7 +490,7 @@
   FieldAccess access = {kTaggedBase,
                         ExternalString::kResourceDataOffset,
                         Handle<Name>(),
-                        Type::UntaggedPointer(),
+                        Type::OtherInternal(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
   return access;
@@ -516,7 +532,7 @@
                         JSGlobalObject::kGlobalProxyOffset,
                         Handle<Name>(),
                         Type::Receiver(),
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -527,11 +543,29 @@
                         JSGlobalObject::kNativeContextOffset,
                         Handle<Name>(),
                         Type::Internal(),
-                        MachineType::AnyTagged(),
+                        MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForJSStringIteratorString() {
+  FieldAccess access = {
+      kTaggedBase,    JSStringIterator::kStringOffset, Handle<Name>(),
+      Type::String(), MachineType::TaggedPointer(),    kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
+  FieldAccess access = {kTaggedBase,
+                        JSStringIterator::kNextIndexOffset,
+                        Handle<Name>(),
+                        TypeCache::Get().kStringLengthType,
+                        MachineType::TaggedSigned(),
+                        kNoWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForValue() {
@@ -590,24 +624,28 @@
   return access;
 }
 
-
 // static
-FieldAccess AccessBuilder::ForPropertyCellValue() {
-  return ForPropertyCellValue(Type::Tagged());
+FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
+  FieldAccess access = {kTaggedBase,
+                        ContextExtension::kScopeInfoOffset,
+                        Handle<Name>(),
+                        Type::OtherInternal(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
 }
 
-
 // static
-FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
+FieldAccess AccessBuilder::ForContextExtensionExtension() {
   FieldAccess access = {
-      kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
-      type,        MachineType::AnyTagged(),   kFullWriteBarrier};
+      kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
+      Type::Any(), MachineType::AnyTagged(),           kFullWriteBarrier};
   return access;
 }
 
 // static
 ElementAccess AccessBuilder::ForFixedArrayElement() {
-  ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
+  ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
                           MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
@@ -619,6 +657,7 @@
   switch (kind) {
     case FAST_SMI_ELEMENTS:
       access.type = TypeCache::Get().kSmi;
+      access.machine_type = MachineType::TaggedSigned();
       access.write_barrier_kind = kNoWriteBarrier;
       break;
     case FAST_HOLEY_SMI_ELEMENTS:
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index caaf8f8..96f3200 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -18,6 +18,12 @@
 class AccessBuilder final : public AllStatic {
  public:
   // ===========================================================================
+  // Access to external values (based on external references).
+
+  // Provides access to a double field identified by an external reference.
+  static FieldAccess ForExternalDoubleValue();
+
+  // ===========================================================================
   // Access to heap object fields and elements (based on tagged pointer).
 
   // Provides access to HeapObject::map() field.
@@ -89,6 +95,9 @@
   // Provides access to JSTypedArray::length() field.
   static FieldAccess ForJSTypedArrayLength();
 
+  // Provides access to JSDate::value() field.
+  static FieldAccess ForJSDateValue();
+
   // Provides access to JSDate fields.
   static FieldAccess ForJSDateField(JSDate::FieldIndex index);
 
@@ -173,6 +182,12 @@
   // Provides access to JSGlobalObject::native_context() field.
   static FieldAccess ForJSGlobalObjectNativeContext();
 
+  // Provides access to JSStringIterator::string() field.
+  static FieldAccess ForJSStringIteratorString();
+
+  // Provides access to JSStringIterator::index() field.
+  static FieldAccess ForJSStringIteratorIndex();
+
   // Provides access to JSValue::value() field.
   static FieldAccess ForValue();
 
@@ -186,9 +201,9 @@
   // Provides access to Context slots.
   static FieldAccess ForContextSlot(size_t index);
 
-  // Provides access to PropertyCell::value() field.
-  static FieldAccess ForPropertyCellValue();
-  static FieldAccess ForPropertyCellValue(Type* type);
+  // Provides access to ContextExtension fields.
+  static FieldAccess ForContextExtensionScopeInfo();
+  static FieldAccess ForContextExtensionExtension();
 
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 97de25b..329cb93 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -7,10 +7,10 @@
 #include "src/accessors.h"
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-info.h"
+#include "src/compiler/type-cache.h"
 #include "src/field-index-inl.h"
 #include "src/field-type.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -79,9 +79,12 @@
 
 // static
 PropertyAccessInfo PropertyAccessInfo::DataField(
-    MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
-    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
-  return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+    MapList const& receiver_maps, FieldIndex field_index,
+    MachineRepresentation field_representation, Type* field_type,
+    MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+    MaybeHandle<Map> transition_map) {
+  return PropertyAccessInfo(holder, transition_map, field_index,
+                            field_representation, field_type, field_map,
                             receiver_maps);
 }
 
@@ -93,13 +96,16 @@
 }
 
 PropertyAccessInfo::PropertyAccessInfo()
-    : kind_(kInvalid), field_type_(Type::None()) {}
+    : kind_(kInvalid),
+      field_representation_(MachineRepresentation::kNone),
+      field_type_(Type::None()) {}
 
 PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
                                        MapList const& receiver_maps)
     : kind_(kNotFound),
       receiver_maps_(receiver_maps),
       holder_(holder),
+      field_representation_(MachineRepresentation::kNone),
       field_type_(Type::None()) {}
 
 PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
@@ -109,18 +115,21 @@
       receiver_maps_(receiver_maps),
       constant_(constant),
       holder_(holder),
+      field_representation_(MachineRepresentation::kNone),
       field_type_(Type::Any()) {}
 
-PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
-                                       MaybeHandle<Map> transition_map,
-                                       FieldIndex field_index, Type* field_type,
-                                       MapList const& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+    FieldIndex field_index, MachineRepresentation field_representation,
+    Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
     : kind_(kDataField),
       receiver_maps_(receiver_maps),
       transition_map_(transition_map),
       holder_(holder),
       field_index_(field_index),
-      field_type_(field_type) {}
+      field_representation_(field_representation),
+      field_type_(field_type),
+      field_map_(field_map) {}
 
 bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
   if (this->kind_ != that->kind_) return false;
@@ -138,7 +147,8 @@
       if (this->transition_map_.address() == that->transition_map_.address() &&
           this->field_index_ == that->field_index_ &&
           this->field_type_->Is(that->field_type_) &&
-          that->field_type_->Is(this->field_type_)) {
+          that->field_type_->Is(this->field_type_) &&
+          this->field_representation_ == that->field_representation_) {
         this->receiver_maps_.insert(this->receiver_maps_.end(),
                                     that->receiver_maps_.begin(),
                                     that->receiver_maps_.end());
@@ -283,41 +293,45 @@
         }
         case DATA: {
           int index = descriptors->GetFieldIndex(number);
-          Representation field_representation = details.representation();
+          Representation details_representation = details.representation();
           FieldIndex field_index = FieldIndex::ForPropertyIndex(
-              *map, index, field_representation.IsDouble());
-          Type* field_type = Type::Tagged();
-          if (field_representation.IsSmi()) {
+              *map, index, details_representation.IsDouble());
+          Type* field_type = Type::NonInternal();
+          MachineRepresentation field_representation =
+              MachineRepresentation::kTagged;
+          MaybeHandle<Map> field_map;
+          if (details_representation.IsSmi()) {
             field_type = type_cache_.kSmi;
-          } else if (field_representation.IsDouble()) {
+            field_representation = MachineRepresentation::kTaggedSigned;
+          } else if (details_representation.IsDouble()) {
             field_type = type_cache_.kFloat64;
-          } else if (field_representation.IsHeapObject()) {
+            field_representation = MachineRepresentation::kFloat64;
+          } else if (details_representation.IsHeapObject()) {
             // Extract the field type from the property details (make sure its
             // representation is TaggedPointer to reflect the heap object case).
-            field_type = Type::Intersect(
-                descriptors->GetFieldType(number)->Convert(zone()),
-                Type::TaggedPointer(), zone());
-            if (field_type->Is(Type::None())) {
+            field_representation = MachineRepresentation::kTaggedPointer;
+            Handle<FieldType> descriptors_field_type(
+                descriptors->GetFieldType(number), isolate());
+            if (descriptors_field_type->IsNone()) {
               // Store is not safe if the field type was cleared.
               if (access_mode == AccessMode::kStore) return false;
 
               // The field type was cleared by the GC, so we don't know anything
               // about the contents now.
-              // TODO(bmeurer): It would be awesome to make this saner in the
-              // runtime/GC interaction.
-              field_type = Type::TaggedPointer();
-            } else if (!Type::Any()->Is(field_type)) {
+            } else if (descriptors_field_type->IsClass()) {
               // Add proper code dependencies in case of stable field map(s).
               Handle<Map> field_owner_map(map->FindFieldOwner(number),
                                           isolate());
               dependencies()->AssumeFieldType(field_owner_map);
-            }
-            if (access_mode == AccessMode::kLoad) {
-              field_type = Type::Any();
+
+              // Remember the field map, and try to infer a useful type.
+              field_type = Type::For(descriptors_field_type->AsClass());
+              field_map = descriptors_field_type->AsClass();
             }
           }
           *access_info = PropertyAccessInfo::DataField(
-              MapList{receiver_map}, field_index, field_type, holder);
+              MapList{receiver_map}, field_index, field_representation,
+              field_type, field_map, holder);
           return true;
         }
         case ACCESSOR_CONSTANT: {
@@ -423,12 +437,14 @@
   int offset;
   if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
     FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
-    Type* field_type = Type::Tagged();
+    Type* field_type = Type::NonInternal();
+    MachineRepresentation field_representation = MachineRepresentation::kTagged;
     if (map->IsStringMap()) {
       DCHECK(Name::Equals(factory()->length_string(), name));
       // The String::length property is always a smi in the range
       // [0, String::kMaxLength].
       field_type = type_cache_.kStringLengthType;
+      field_representation = MachineRepresentation::kTaggedSigned;
     } else if (map->IsJSArrayMap()) {
       DCHECK(Name::Equals(factory()->length_string(), name));
       // The JSArray::length property is a smi in the range
@@ -438,14 +454,16 @@
       // case of other arrays.
       if (IsFastDoubleElementsKind(map->elements_kind())) {
         field_type = type_cache_.kFixedDoubleArrayLengthType;
+        field_representation = MachineRepresentation::kTaggedSigned;
       } else if (IsFastElementsKind(map->elements_kind())) {
         field_type = type_cache_.kFixedArrayLengthType;
+        field_representation = MachineRepresentation::kTaggedSigned;
       } else {
         field_type = type_cache_.kJSArrayLengthType;
       }
     }
-    *access_info =
-        PropertyAccessInfo::DataField(MapList{map}, field_index, field_type);
+    *access_info = PropertyAccessInfo::DataField(
+        MapList{map}, field_index, field_representation, field_type);
     return true;
   }
   return false;
@@ -468,35 +486,43 @@
     // TODO(bmeurer): Handle transition to data constant?
     if (details.type() != DATA) return false;
     int const index = details.field_index();
-    Representation field_representation = details.representation();
+    Representation details_representation = details.representation();
     FieldIndex field_index = FieldIndex::ForPropertyIndex(
-        *transition_map, index, field_representation.IsDouble());
-    Type* field_type = Type::Tagged();
-    if (field_representation.IsSmi()) {
+        *transition_map, index, details_representation.IsDouble());
+    Type* field_type = Type::NonInternal();
+    MaybeHandle<Map> field_map;
+    MachineRepresentation field_representation = MachineRepresentation::kTagged;
+    if (details_representation.IsSmi()) {
       field_type = type_cache_.kSmi;
-    } else if (field_representation.IsDouble()) {
+      field_representation = MachineRepresentation::kTaggedSigned;
+    } else if (details_representation.IsDouble()) {
       field_type = type_cache_.kFloat64;
-    } else if (field_representation.IsHeapObject()) {
+      field_representation = MachineRepresentation::kFloat64;
+    } else if (details_representation.IsHeapObject()) {
       // Extract the field type from the property details (make sure its
       // representation is TaggedPointer to reflect the heap object case).
-      field_type = Type::Intersect(
-          transition_map->instance_descriptors()->GetFieldType(number)->Convert(
-              zone()),
-          Type::TaggedPointer(), zone());
-      if (field_type->Is(Type::None())) {
+      field_representation = MachineRepresentation::kTaggedPointer;
+      Handle<FieldType> descriptors_field_type(
+          transition_map->instance_descriptors()->GetFieldType(number),
+          isolate());
+      if (descriptors_field_type->IsNone()) {
         // Store is not safe if the field type was cleared.
         return false;
-      } else if (!Type::Any()->Is(field_type)) {
+      } else if (descriptors_field_type->IsClass()) {
         // Add proper code dependencies in case of stable field map(s).
         Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
                                     isolate());
         dependencies()->AssumeFieldType(field_owner_map);
+
+        // Remember the field map, and try to infer a useful type.
+        field_type = Type::For(descriptors_field_type->AsClass());
+        field_map = descriptors_field_type->AsClass();
       }
-      DCHECK(field_type->Is(Type::TaggedPointer()));
     }
     dependencies()->AssumeMapNotDeprecated(transition_map);
     *access_info = PropertyAccessInfo::DataField(
-        MapList{map}, field_index, field_type, holder, transition_map);
+        MapList{map}, field_index, field_representation, field_type, field_map,
+        holder, transition_map);
     return true;
   }
   return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index daa8722..ac186fb 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -8,8 +8,9 @@
 #include <iosfwd>
 
 #include "src/field-index.h"
+#include "src/machine-type.h"
 #include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -17,10 +18,13 @@
 // Forward declarations.
 class CompilationDependencies;
 class Factory;
-class TypeCache;
 
 namespace compiler {
 
+// Forward declarations.
+class Type;
+class TypeCache;
+
 // Whether we are loading a property or storing to a property.
 enum class AccessMode { kLoad, kStore };
 
@@ -66,7 +70,9 @@
                                          Handle<Object> constant,
                                          MaybeHandle<JSObject> holder);
   static PropertyAccessInfo DataField(
-      MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
+      MapList const& receiver_maps, FieldIndex field_index,
+      MachineRepresentation field_representation, Type* field_type,
+      MaybeHandle<Map> field_map = MaybeHandle<Map>(),
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
   static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
@@ -90,6 +96,10 @@
   Handle<Object> constant() const { return constant_; }
   FieldIndex field_index() const { return field_index_; }
   Type* field_type() const { return field_type_; }
+  MachineRepresentation field_representation() const {
+    return field_representation_;
+  }
+  MaybeHandle<Map> field_map() const { return field_map_; }
   MapList const& receiver_maps() const { return receiver_maps_; }
 
  private:
@@ -99,7 +109,9 @@
                      Handle<Object> constant, MapList const& receiver_maps);
   PropertyAccessInfo(MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
-                     Type* field_type, MapList const& receiver_maps);
+                     MachineRepresentation field_representation,
+                     Type* field_type, MaybeHandle<Map> field_map,
+                     MapList const& receiver_maps);
 
   Kind kind_;
   MapList receiver_maps_;
@@ -107,7 +119,9 @@
   MaybeHandle<Map> transition_map_;
   MaybeHandle<JSObject> holder_;
   FieldIndex field_index_;
+  MachineRepresentation field_representation_;
   Type* field_type_;
+  MaybeHandle<Map> field_map_;
 };
 
 
diff --git a/src/compiler/all-nodes.cc b/src/compiler/all-nodes.cc
index 8040897..eada0cf 100644
--- a/src/compiler/all-nodes.cc
+++ b/src/compiler/all-nodes.cc
@@ -14,13 +14,26 @@
     : reachable(local_zone),
       is_reachable_(graph->NodeCount(), false, local_zone),
       only_inputs_(only_inputs) {
-  Node* end = graph->end();
+  Mark(local_zone, graph->end(), graph);
+}
+
+AllNodes::AllNodes(Zone* local_zone, Node* end, const Graph* graph,
+                   bool only_inputs)
+    : reachable(local_zone),
+      is_reachable_(graph->NodeCount(), false, local_zone),
+      only_inputs_(only_inputs) {
+  Mark(local_zone, end, graph);
+}
+
+void AllNodes::Mark(Zone* local_zone, Node* end, const Graph* graph) {
+  DCHECK_LT(end->id(), graph->NodeCount());
   is_reachable_[end->id()] = true;
   reachable.push_back(end);
-  // Find all nodes reachable from end.
+  // Find all nodes reachable from {end}.
   for (size_t i = 0; i < reachable.size(); i++) {
-    for (Node* input : reachable[i]->inputs()) {
-      if (input == nullptr || input->id() >= graph->NodeCount()) {
+    for (Node* const input : reachable[i]->inputs()) {
+      if (input == nullptr) {
+        // TODO(titzer): print a warning.
         continue;
       }
       if (!is_reachable_[input->id()]) {
@@ -28,7 +41,7 @@
         reachable.push_back(input);
       }
     }
-    if (!only_inputs) {
+    if (!only_inputs_) {
       for (Node* use : reachable[i]->uses()) {
         if (use == nullptr || use->id() >= graph->NodeCount()) {
           continue;
diff --git a/src/compiler/all-nodes.h b/src/compiler/all-nodes.h
index 36f02e9..7c70bf7 100644
--- a/src/compiler/all-nodes.h
+++ b/src/compiler/all-nodes.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_ALL_NODES_H_
 
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -16,9 +16,13 @@
 // from end.
 class AllNodes {
  public:
-  // Constructor. Traverses the graph and builds the {reachable} sets. When
-  // {only_inputs} is true, find the nodes reachable through input edges;
-  // these are all live nodes.
+  // Constructor. Traverses the graph and builds the {reachable} set of nodes
+  // reachable from {end}. When {only_inputs} is true, find the nodes
+  // reachable through input edges; these are all live nodes.
+  AllNodes(Zone* local_zone, Node* end, const Graph* graph,
+           bool only_inputs = true);
+  // Constructor. Traverses the graph and builds the {reachable} set of nodes
+  // reachable from the End node.
   AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs = true);
 
   bool IsLive(Node* node) {
@@ -35,6 +39,8 @@
   NodeVector reachable;  // Nodes reachable from end.
 
  private:
+  void Mark(Zone* local_zone, Node* end, const Graph* graph);
+
   BoolVector is_reachable_;
   const bool only_inputs_;
 };
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 4ae282a..dbe1828 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -5,7 +5,7 @@
 #include "src/compiler/code-generator.h"
 
 #include "src/arm/macro-assembler-arm.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -271,6 +271,37 @@
   UnwindingInfoWriter* const unwinding_info_writer_;
 };
 
+template <typename T>
+class OutOfLineFloatMin final : public OutOfLineCode {
+ public:
+  OutOfLineFloatMin(CodeGenerator* gen, T result, T left, T right)
+      : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
+
+  void Generate() final { __ FloatMinOutOfLine(result_, left_, right_); }
+
+ private:
+  T const result_;
+  T const left_;
+  T const right_;
+};
+typedef OutOfLineFloatMin<SwVfpRegister> OutOfLineFloat32Min;
+typedef OutOfLineFloatMin<DwVfpRegister> OutOfLineFloat64Min;
+
+template <typename T>
+class OutOfLineFloatMax final : public OutOfLineCode {
+ public:
+  OutOfLineFloatMax(CodeGenerator* gen, T result, T left, T right)
+      : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
+
+  void Generate() final { __ FloatMaxOutOfLine(result_, left_, right_); }
+
+ private:
+  T const result_;
+  T const left_;
+  T const right_;
+};
+typedef OutOfLineFloatMax<SwVfpRegister> OutOfLineFloat32Max;
+typedef OutOfLineFloatMax<DwVfpRegister> OutOfLineFloat64Max;
 
 Condition FlagsConditionToCondition(FlagsCondition condition) {
   switch (condition) {
@@ -707,9 +738,6 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -725,8 +753,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1199,33 +1227,51 @@
     case kArmVnegF64:
       __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kArmVrintmF32:
+    case kArmVrintmF32: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
-    case kArmVrintmF64:
+    }
+    case kArmVrintmF64: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kArmVrintpF32:
+    }
+    case kArmVrintpF32: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
-    case kArmVrintpF64:
+    }
+    case kArmVrintpF64: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kArmVrintzF32:
+    }
+    case kArmVrintzF32: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
-    case kArmVrintzF64:
+    }
+    case kArmVrintzF64: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kArmVrintaF64:
+    }
+    case kArmVrintaF64: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kArmVrintnF32:
+    }
+    case kArmVrintnF32: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
-    case kArmVrintnF64:
+    }
+    case kArmVrintnF64: {
+      CpuFeatureScope scope(masm(), ARMv8);
       __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
+    }
     case kArmVcvtF32F64: {
       __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1380,145 +1426,59 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmFloat32Max: {
-      FloatRegister left_reg = i.InputFloat32Register(0);
-      FloatRegister right_reg = i.InputFloat32Register(1);
-      FloatRegister result_reg = i.OutputFloat32Register();
-      Label result_is_nan, return_left, return_right, check_zero, done;
-      __ VFPCompareAndSetFlags(left_reg, right_reg);
-      __ b(mi, &return_right);
-      __ b(gt, &return_left);
-      __ b(vs, &result_is_nan);
-      // Left equals right => check for -0.
-      __ VFPCompareAndSetFlags(left_reg, 0.0);
-      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
-        __ b(ne, &done);  // left == right != 0.
+      SwVfpRegister result = i.OutputFloat32Register();
+      SwVfpRegister left = i.InputFloat32Register(0);
+      SwVfpRegister right = i.InputFloat32Register(1);
+      if (left.is(right)) {
+        __ Move(result, left);
       } else {
-        __ b(ne, &return_left);  // left == right != 0.
+        auto ool = new (zone()) OutOfLineFloat32Max(this, result, left, right);
+        __ FloatMax(result, left, right, ool->entry());
+        __ bind(ool->exit());
       }
-      // At this point, both left and right are either 0 or -0.
-      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
-      // the decision for vadd is easy because vand is a NEON instruction.
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&result_is_nan);
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&return_right);
-      __ Move(result_reg, right_reg);
-      if (!left_reg.is(result_reg)) __ b(&done);
-      __ bind(&return_left);
-      __ Move(result_reg, left_reg);
-      __ bind(&done);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmFloat64Max: {
-      DwVfpRegister left_reg = i.InputDoubleRegister(0);
-      DwVfpRegister right_reg = i.InputDoubleRegister(1);
-      DwVfpRegister result_reg = i.OutputDoubleRegister();
-      Label result_is_nan, return_left, return_right, check_zero, done;
-      __ VFPCompareAndSetFlags(left_reg, right_reg);
-      __ b(mi, &return_right);
-      __ b(gt, &return_left);
-      __ b(vs, &result_is_nan);
-      // Left equals right => check for -0.
-      __ VFPCompareAndSetFlags(left_reg, 0.0);
-      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
-        __ b(ne, &done);  // left == right != 0.
+      DwVfpRegister result = i.OutputDoubleRegister();
+      DwVfpRegister left = i.InputDoubleRegister(0);
+      DwVfpRegister right = i.InputDoubleRegister(1);
+      if (left.is(right)) {
+        __ Move(result, left);
       } else {
-        __ b(ne, &return_left);  // left == right != 0.
+        auto ool = new (zone()) OutOfLineFloat64Max(this, result, left, right);
+        __ FloatMax(result, left, right, ool->entry());
+        __ bind(ool->exit());
       }
-      // At this point, both left and right are either 0 or -0.
-      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
-      // the decision for vadd is easy because vand is a NEON instruction.
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&result_is_nan);
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&return_right);
-      __ Move(result_reg, right_reg);
-      if (!left_reg.is(result_reg)) __ b(&done);
-      __ bind(&return_left);
-      __ Move(result_reg, left_reg);
-      __ bind(&done);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmFloat32Min: {
-      FloatRegister left_reg = i.InputFloat32Register(0);
-      FloatRegister right_reg = i.InputFloat32Register(1);
-      FloatRegister result_reg = i.OutputFloat32Register();
-      Label result_is_nan, return_left, return_right, check_zero, done;
-      __ VFPCompareAndSetFlags(left_reg, right_reg);
-      __ b(mi, &return_left);
-      __ b(gt, &return_right);
-      __ b(vs, &result_is_nan);
-      // Left equals right => check for -0.
-      __ VFPCompareAndSetFlags(left_reg, 0.0);
-      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
-        __ b(ne, &done);  // left == right != 0.
+      SwVfpRegister result = i.OutputFloat32Register();
+      SwVfpRegister left = i.InputFloat32Register(0);
+      SwVfpRegister right = i.InputFloat32Register(1);
+      if (left.is(right)) {
+        __ Move(result, left);
       } else {
-        __ b(ne, &return_left);  // left == right != 0.
+        auto ool = new (zone()) OutOfLineFloat32Min(this, result, left, right);
+        __ FloatMin(result, left, right, ool->entry());
+        __ bind(ool->exit());
       }
-      // At this point, both left and right are either 0 or -0.
-      // We could use a single 'vorr' instruction here if we had NEON support.
-      // The algorithm is: -((-L) + (-R)), which in case of L and R being
-      // different registers is most efficiently expressed as -((-L) - R).
-      __ vneg(left_reg, left_reg);
-      if (left_reg.is(right_reg)) {
-        __ vadd(result_reg, left_reg, right_reg);
-      } else {
-        __ vsub(result_reg, left_reg, right_reg);
-      }
-      __ vneg(result_reg, result_reg);
-      __ b(&done);
-      __ bind(&result_is_nan);
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&return_right);
-      __ Move(result_reg, right_reg);
-      if (!left_reg.is(result_reg)) __ b(&done);
-      __ bind(&return_left);
-      __ Move(result_reg, left_reg);
-      __ bind(&done);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmFloat64Min: {
-      DwVfpRegister left_reg = i.InputDoubleRegister(0);
-      DwVfpRegister right_reg = i.InputDoubleRegister(1);
-      DwVfpRegister result_reg = i.OutputDoubleRegister();
-      Label result_is_nan, return_left, return_right, check_zero, done;
-      __ VFPCompareAndSetFlags(left_reg, right_reg);
-      __ b(mi, &return_left);
-      __ b(gt, &return_right);
-      __ b(vs, &result_is_nan);
-      // Left equals right => check for -0.
-      __ VFPCompareAndSetFlags(left_reg, 0.0);
-      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
-        __ b(ne, &done);  // left == right != 0.
+      DwVfpRegister result = i.OutputDoubleRegister();
+      DwVfpRegister left = i.InputDoubleRegister(0);
+      DwVfpRegister right = i.InputDoubleRegister(1);
+      if (left.is(right)) {
+        __ Move(result, left);
       } else {
-        __ b(ne, &return_left);  // left == right != 0.
+        auto ool = new (zone()) OutOfLineFloat64Min(this, result, left, right);
+        __ FloatMin(result, left, right, ool->entry());
+        __ bind(ool->exit());
       }
-      // At this point, both left and right are either 0 or -0.
-      // We could use a single 'vorr' instruction here if we had NEON support.
-      // The algorithm is: -((-L) + (-R)), which in case of L and R being
-      // different registers is most efficiently expressed as -((-L) - R).
-      __ vneg(left_reg, left_reg);
-      if (left_reg.is(right_reg)) {
-        __ vadd(result_reg, left_reg, right_reg);
-      } else {
-        __ vsub(result_reg, left_reg, right_reg);
-      }
-      __ vneg(result_reg, result_reg);
-      __ b(&done);
-      __ bind(&result_is_nan);
-      __ vadd(result_reg, left_reg, right_reg);
-      __ b(&done);
-      __ bind(&return_right);
-      __ Move(result_reg, right_reg);
-      if (!left_reg.is(result_reg)) __ b(&done);
-      __ bind(&return_left);
-      __ Move(result_reg, left_reg);
-      __ bind(&done);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmFloat64SilenceNaN: {
@@ -1679,7 +1639,8 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
@@ -1688,7 +1649,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
   return kSuccess;
@@ -1967,33 +1928,31 @@
     __ vstr(temp_1, src);
   } else if (source->IsFPRegister()) {
     LowDwVfpRegister temp = kScratchDoubleReg;
-      DwVfpRegister src = g.ToDoubleRegister(source);
-      if (destination->IsFPRegister()) {
-        DwVfpRegister dst = g.ToDoubleRegister(destination);
-        __ Move(temp, src);
-        __ Move(src, dst);
-        __ Move(dst, temp);
-      } else {
-        DCHECK(destination->IsFPStackSlot());
-        MemOperand dst = g.ToMemOperand(destination);
-        __ Move(temp, src);
-        __ vldr(src, dst);
-        __ vstr(temp, dst);
-      }
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsFPRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ vswp(src, dst);
+    } else {
+      DCHECK(destination->IsFPStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ vldr(src, dst);
+      __ vstr(temp, dst);
+    }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     LowDwVfpRegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
     MemOperand dst0 = g.ToMemOperand(destination);
-      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
-      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
-      __ vldr(temp_1, dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ ldr(temp_0, src1);
-      __ str(temp_0, dst1);
-      __ vstr(temp_1, src0);
+    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+    __ vldr(temp_1, dst0);  // Save destination in temp_1.
+    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+    __ str(temp_0, dst0);
+    __ ldr(temp_0, src1);
+    __ str(temp_0, dst1);
+    __ vstr(temp_1, src0);
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 4b0b6af..ceb5b25 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -252,14 +252,7 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  if (cont->IsDeoptimize()) {
-    // If we can deoptimize as a result of the binop, we need to make sure that
-    // the deopt inputs are not overwritten by the binop result. One way
-    // to achieve that is to declare the output register as same-as-first.
-    outputs[output_count++] = g.DefineSameAsFirst(node);
-  } else {
-    outputs[output_count++] = g.DefineAsRegister(node);
-  }
+  outputs[output_count++] = g.DefineAsRegister(node);
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -419,6 +412,10 @@
   EmitLoad(this, opcode, &output, base, index);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   ArmOperandGenerator g(this);
@@ -431,7 +428,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -1516,46 +1513,55 @@
 
 
 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintmF32, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintmF64, node);
 }
 
 
 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintpF32, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintpF64, node);
 }
 
 
 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintzF32, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintzF64, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintaF64, node);
 }
 
 
 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintnF32, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
   VisitRR(this, kArmVrintnF64, node);
 }
 
@@ -1965,6 +1971,10 @@
     break;
   }
 
+  if (user->opcode() == IrOpcode::kWord32Equal) {
+    return VisitWordCompare(selector, user, cont);
+  }
+
   // Continuation could not be combined with a compare, emit compare against 0.
   ArmOperandGenerator g(selector);
   InstructionCode const opcode =
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 35f7e43..f543b18 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -6,7 +6,7 @@
 
 #include "src/arm64/frames-arm64.h"
 #include "src/arm64/macro-assembler-arm64.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -766,9 +766,6 @@
     case kArchDebugBreak:
       __ Debug("kArchDebugBreak", 0, BREAK);
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -783,8 +780,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1755,13 +1752,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1956,10 +1954,14 @@
         __ Fmov(dst, src.ToFloat32());
       } else {
         DCHECK(destination->IsFPStackSlot());
-        UseScratchRegisterScope scope(masm());
-        FPRegister temp = scope.AcquireS();
-        __ Fmov(temp, src.ToFloat32());
-        __ Str(temp, g.ToMemOperand(destination, masm()));
+        if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+          __ Str(wzr, g.ToMemOperand(destination, masm()));
+        } else {
+          UseScratchRegisterScope scope(masm());
+          FPRegister temp = scope.AcquireS();
+          __ Fmov(temp, src.ToFloat32());
+          __ Str(temp, g.ToMemOperand(destination, masm()));
+        }
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
@@ -1968,10 +1970,14 @@
         __ Fmov(dst, src.ToFloat64());
       } else {
         DCHECK(destination->IsFPStackSlot());
-        UseScratchRegisterScope scope(masm());
-        FPRegister temp = scope.AcquireD();
-        __ Fmov(temp, src.ToFloat64());
-        __ Str(temp, g.ToMemOperand(destination, masm()));
+        if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
+          __ Str(xzr, g.ToMemOperand(destination, masm()));
+        } else {
+          UseScratchRegisterScope scope(masm());
+          FPRegister temp = scope.AcquireD();
+          __ Fmov(temp, src.ToFloat64());
+          __ Str(temp, g.ToMemOperand(destination, masm()));
+        }
       }
     }
   } else if (source->IsFPRegister()) {
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 9bc5385..da27be8 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -434,24 +434,18 @@
   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
                               !is_add_sub)) {
     Matcher m_shift(right_node);
-    inputs[input_count++] = cont->IsDeoptimize()
-                                ? g.UseRegister(left_node)
-                                : g.UseRegisterOrImmediateZero(left_node);
+    inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
                                              !is_add_sub)) {
     if (must_commute_cond) cont->Commute();
     Matcher m_shift(left_node);
-    inputs[input_count++] = cont->IsDeoptimize()
-                                ? g.UseRegister(right_node)
-                                : g.UseRegisterOrImmediateZero(right_node);
+    inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
   } else {
-    inputs[input_count++] = cont->IsDeoptimize()
-                                ? g.UseRegister(left_node)
-                                : g.UseRegisterOrImmediateZero(left_node);
+    inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(right_node);
   }
 
@@ -461,14 +455,7 @@
   }
 
   if (!IsComparisonField::decode(properties)) {
-    if (cont->IsDeoptimize()) {
-      // If we can deoptimize as a result of the binop, we need to make sure
-      // that the deopt inputs are not overwritten by the binop result. One way
-      // to achieve that is to declare the output register as same-as-first.
-      outputs[output_count++] = g.DefineSameAsFirst(node);
-    } else {
-      outputs[output_count++] = g.DefineAsRegister(node);
-    }
+    outputs[output_count++] = g.DefineAsRegister(node);
   }
 
   if (cont->IsSet()) {
@@ -606,6 +593,10 @@
   EmitLoad(this, node, opcode, immediate_mode, rep);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   Arm64OperandGenerator g(this);
@@ -619,7 +610,7 @@
 
   // TODO(arm64): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -2128,11 +2119,101 @@
   }
 }
 
+// Map {cond} to kEqual or kNotEqual, so that we can select
+// either TBZ or TBNZ when generating code for:
+// (x cmp 0), b.{cond}
+FlagsCondition MapForTbz(FlagsCondition cond) {
+  switch (cond) {
+    case kSignedLessThan:  // generate TBNZ
+      return kNotEqual;
+    case kSignedGreaterThanOrEqual:  // generate TBZ
+      return kEqual;
+    default:
+      UNREACHABLE();
+      return cond;
+  }
+}
+
+// Map {cond} to kEqual or kNotEqual, so that we can select
+// either CBZ or CBNZ when generating code for:
+// (x cmp 0), b.{cond}
+FlagsCondition MapForCbz(FlagsCondition cond) {
+  switch (cond) {
+    case kEqual:     // generate CBZ
+    case kNotEqual:  // generate CBNZ
+      return cond;
+    case kUnsignedLessThanOrEqual:  // generate CBZ
+      return kEqual;
+    case kUnsignedGreaterThan:  // generate CBNZ
+      return kNotEqual;
+    default:
+      UNREACHABLE();
+      return cond;
+  }
+}
+
+// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
+// against zero, depending on the condition.
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
+                     FlagsCondition cond, FlagsContinuation* cont) {
+  Int32BinopMatcher m_user(user);
+  USE(m_user);
+  DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
+
+  // Only handle branches.
+  if (!cont->IsBranch()) return false;
+
+  switch (cond) {
+    case kSignedLessThan:
+    case kSignedGreaterThanOrEqual: {
+      Arm64OperandGenerator g(selector);
+      cont->Overwrite(MapForTbz(cond));
+      Int32Matcher m(node);
+      if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
+        // SignedLessThan(Float64ExtractHighWord32(x), 0) and
+        // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
+        // check the sign bit of a 64-bit floating point value.
+        InstructionOperand temp = g.TempRegister();
+        selector->Emit(kArm64U64MoveFloat64, temp,
+                       g.UseRegister(node->InputAt(0)));
+        selector->Emit(cont->Encode(kArm64TestAndBranch), g.NoOutput(), temp,
+                       g.TempImmediate(63), g.Label(cont->true_block()),
+                       g.Label(cont->false_block()));
+        return true;
+      }
+      selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+                     g.UseRegister(node), g.TempImmediate(31),
+                     g.Label(cont->true_block()), g.Label(cont->false_block()));
+      return true;
+    }
+    case kEqual:
+    case kNotEqual:
+    case kUnsignedLessThanOrEqual:
+    case kUnsignedGreaterThan: {
+      Arm64OperandGenerator g(selector);
+      cont->Overwrite(MapForCbz(cond));
+      selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+                     g.UseRegister(node), g.Label(cont->true_block()),
+                     g.Label(cont->false_block()));
+      return true;
+    }
+    default:
+      return false;
+  }
+}
+
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
   Int32BinopMatcher m(node);
   ArchOpcode opcode = kArm64Cmp32;
   FlagsCondition cond = cont->condition();
+  if (m.right().Is(0)) {
+    if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
+  } else if (m.left().Is(0)) {
+    FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+    if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
+      return;
+  }
   ImmediateMode immediate_mode = kArithmeticImm;
   if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
     // Emit flag setting add/and instructions for comparisons against zero.
@@ -2145,14 +2226,18 @@
              (m.right().IsInt32Add() || m.right().IsWord32And())) {
     // Same as above, but we need to commute the condition before we
     // continue with the rest of the checks.
-    cond = CommuteFlagsCondition(cond);
-    if (CanUseFlagSettingBinop(cond)) {
+    FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+    if (CanUseFlagSettingBinop(commuted_cond)) {
       Node* binop = m.right().node();
       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
-                                              cond, cont, &immediate_mode);
+                                              commuted_cond, cont,
+                                              &immediate_mode);
     }
-  } else if (m.right().IsInt32Sub()) {
+  } else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
     // Select negated compare for comparisons with negated right input.
+    // Only do this for kEqual and kNotEqual, which do not depend on the
+    // C and V flags, as those flags will be different with CMN when the
+    // right-hand side of the original subtraction is INT_MIN.
     Node* sub = m.right().node();
     Int32BinopMatcher msub(sub);
     if (msub.left().Is(0)) {
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 0f1fb29..b292a2e 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -4,7 +4,9 @@
 
 #include "src/compiler/ast-graph-builder.h"
 
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/control-builders.h"
@@ -16,7 +18,6 @@
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/state-values-utils.h"
 #include "src/compiler/type-hint-analyzer.h"
-#include "src/parsing/parser.h"
 
 namespace v8 {
 namespace internal {
@@ -410,14 +411,15 @@
   TryFinallyBuilder* control_;
 };
 
-
 AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                                 JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
+                                 JSGraph* jsgraph, float invocation_frequency,
+                                 LoopAssignmentAnalysis* loop,
                                  TypeHintAnalysis* type_hint_analysis)
     : isolate_(info->isolate()),
       local_zone_(local_zone),
       info_(info),
       jsgraph_(jsgraph),
+      invocation_frequency_(invocation_frequency),
       environment_(nullptr),
       ast_context_(nullptr),
       globals_(0, local_zone),
@@ -535,12 +537,11 @@
   // TODO(mstarzinger): For now we cannot assume that the {this} parameter is
   // not {the_hole}, because for derived classes {this} has a TDZ and the
   // JSConstructStubForDerived magically passes {the_hole} as a receiver.
-  if (scope->has_this_declaration() && scope->receiver()->is_const_mode()) {
+  if (scope->has_this_declaration() && scope->receiver()->mode() == CONST) {
     env.RawParameterBind(0, jsgraph()->TheHoleConstant());
   }
 
-  // Build local context only if there are context allocated variables.
-  if (scope->num_heap_slots() > 0) {
+  if (scope->NeedsContext()) {
     // Push a new inner context scope for the current activation.
     Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
     ContextScope top_context(this, scope, inner_context);
@@ -573,9 +574,8 @@
   BuildArgumentsObject(scope->arguments());
 
   // Build rest arguments array if it is used.
-  int rest_index;
-  Variable* rest_parameter = scope->rest_parameter(&rest_index);
-  BuildRestArgumentsArray(rest_parameter, rest_index);
+  Variable* rest_parameter = scope->rest_parameter();
+  BuildRestArgumentsArray(rest_parameter);
 
   // Build assignment to {.this_function} variable if it is used.
   BuildThisFunctionVariable(scope->this_function_var());
@@ -629,8 +629,7 @@
 // Gets the bailout id just before reading a variable proxy, but only for
 // unallocated variables.
 static BailoutId BeforeId(VariableProxy* proxy) {
-  return proxy->var()->IsUnallocatedOrGlobalSlot() ? proxy->BeforeId()
-                                                   : BailoutId::None();
+  return proxy->var()->IsUnallocated() ? proxy->BeforeId() : BailoutId::None();
 }
 
 static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
@@ -788,6 +787,10 @@
   return env;
 }
 
+AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
+  return new (zone())
+      Environment(this, builder_->liveness_analyzer()->NewBlock());
+}
 
 AstGraphBuilder::Environment*
 AstGraphBuilder::Environment::CopyAndShareLiveness() {
@@ -802,8 +805,15 @@
 
 AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
     BitVector* assigned, bool is_osr) {
-  PrepareForLoop(assigned, is_osr);
-  return CopyAndShareLiveness();
+  PrepareForLoop(assigned);
+  Environment* loop = CopyAndShareLiveness();
+  if (is_osr) {
+    // Create and merge the OSR entry if necessary.
+    Environment* osr_env = CopyForOsrEntry();
+    osr_env->PrepareForOsrEntry();
+    loop->Merge(osr_env);
+  }
+  return loop;
 }
 
 
@@ -1085,7 +1095,6 @@
 void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
@@ -1125,7 +1134,6 @@
 void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
           decl->fun(), info()->script(), info());
@@ -1240,7 +1248,8 @@
   VisitForValue(stmt->expression());
   Node* value = environment()->Pop();
   Node* object = BuildToObject(value, stmt->ToObjectId());
-  const Operator* op = javascript()->CreateWithContext();
+  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
+  const Operator* op = javascript()->CreateWithContext(scope_info);
   Node* context = NewNode(op, object, GetFunctionClosureForContext());
   PrepareFrameState(context, stmt->EntryId());
   VisitInScope(stmt->statement(), stmt->scope(), context);
@@ -1394,9 +1403,14 @@
       Node* cache_type = environment()->Peek(3);
       Node* object = environment()->Peek(4);
 
-      // Check loop termination condition.
-      Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
-      for_loop.BreakWhen(exit_cond);
+      // Check loop termination condition (we know that the {index} is always
+      // in Smi range, so we can just set the hint on the comparison below).
+      PrepareEagerCheckpoint(stmt->EntryId());
+      Node* exit_cond =
+          NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall),
+                  index, cache_length);
+      PrepareFrameState(exit_cond, BailoutId::None());
+      for_loop.BreakUnless(exit_cond);
 
       // Compute the next enumerated value.
       Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
@@ -1424,9 +1438,13 @@
       test_value.End();
       for_loop.EndBody();
 
-      // Increment counter and continue.
+      // Increment counter and continue (we know that the {index} is always
+      // in Smi range, so we can just set the hint on the increment below).
       index = environment()->Peek(0);
-      index = NewNode(javascript()->ForInStep(), index);
+      PrepareEagerCheckpoint(stmt->IncrementId());
+      index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall),
+                      index, jsgraph()->OneConstant());
+      PrepareFrameState(index, BailoutId::None());
       environment()->Poke(0, index);
     }
     for_loop.EndLoop();
@@ -1475,7 +1493,8 @@
   // Create a catch scope that binds the exception.
   Node* exception = try_control.GetExceptionNode();
   Handle<String> name = stmt->variable()->name();
-  const Operator* op = javascript()->CreateCatchContext(name);
+  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
+  const Operator* op = javascript()->CreateCatchContext(name, scope_info);
   Node* context = NewNode(op, exception, GetFunctionClosureForContext());
 
   // Evaluate the catch-block.
@@ -1595,7 +1614,7 @@
 
   // Create nodes to store method values into the literal.
   for (int i = 0; i < expr->properties()->length(); i++) {
-    ObjectLiteral::Property* property = expr->properties()->at(i);
+    ClassLiteral::Property* property = expr->properties()->at(i);
     environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
 
     VisitForValue(property->key());
@@ -1620,11 +1639,7 @@
     BuildSetHomeObject(value, receiver, property);
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED: {
+      case ClassLiteral::Property::METHOD: {
         Node* attr = jsgraph()->Constant(DONT_ENUM);
         Node* set_function_name =
             jsgraph()->Constant(property->NeedsSetFunctionName());
@@ -1634,20 +1649,24 @@
         PrepareFrameState(call, BailoutId::None());
         break;
       }
-      case ObjectLiteral::Property::GETTER: {
+      case ClassLiteral::Property::GETTER: {
         Node* attr = jsgraph()->Constant(DONT_ENUM);
         const Operator* op = javascript()->CallRuntime(
             Runtime::kDefineGetterPropertyUnchecked, 4);
         NewNode(op, receiver, key, value, attr);
         break;
       }
-      case ObjectLiteral::Property::SETTER: {
+      case ClassLiteral::Property::SETTER: {
         Node* attr = jsgraph()->Constant(DONT_ENUM);
         const Operator* op = javascript()->CallRuntime(
             Runtime::kDefineSetterPropertyUnchecked, 4);
         NewNode(op, receiver, key, value, attr);
         break;
       }
+      case ClassLiteral::Property::FIELD: {
+        UNREACHABLE();
+        break;
+      }
     }
   }
 
@@ -1945,8 +1964,8 @@
 
   // Create nodes to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < expr->values()->length(); array_index++) {
+  for (int array_index = 0; array_index < expr->values()->length();
+       array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
     DCHECK(!subexpr->IsSpread());
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1961,26 +1980,6 @@
                       OutputFrameStateCombine::Ignore());
   }
 
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  for (; array_index < expr->values()->length(); array_index++) {
-    Expression* subexpr = expr->values()->at(array_index);
-    DCHECK(!subexpr->IsSpread());
-
-    VisitForValue(subexpr);
-    {
-      Node* value = environment()->Pop();
-      Node* array = environment()->Pop();
-      const Operator* op = javascript()->CallRuntime(Runtime::kAppendElement);
-      Node* result = NewNode(op, array, value);
-      PrepareFrameState(result, expr->GetIdForElement(array_index));
-      environment()->Push(result);
-    }
-  }
-
   ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
@@ -2447,12 +2446,17 @@
   }
 
   // Create node to perform the function call.
+  float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
-  const Operator* call = javascript()->CallFunction(
-      args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
+  const Operator* call =
+      javascript()->CallFunction(args->length() + 2, frequency, feedback,
+                                 receiver_hint, expr->tail_call_mode());
   PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
-  environment()->Push(value->InputAt(0));  // The callee passed to the call.
+  // The callee passed to the call, we just need to push something here to
+  // satisfy the bailout location contract. The fullcodegen code will not
+  // ever look at this value, so we just push optimized_out here.
+  environment()->Push(jsgraph()->OptimizedOutConstant());
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   environment()->Drop(1);
   ast_context()->ProduceValue(expr, value);
@@ -2480,7 +2484,7 @@
 
   // Create node to perform the super call.
   const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
+      javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(expr, value);
@@ -2498,9 +2502,10 @@
   environment()->Push(environment()->Peek(args->length()));
 
   // Create node to perform the construct call.
+  float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
   const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, feedback);
+      javascript()->CallConstruct(args->length() + 2, frequency, feedback);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(expr, value);
@@ -3086,7 +3091,7 @@
   DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
   uint32_t check_depths = 0;
   for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (s->num_heap_slots() <= 0) continue;
+    if (!s->NeedsContext()) continue;
     if (!s->calls_sloppy_eval()) continue;
     int depth = current_scope()->ContextChainLength(s);
     if (depth > kMaxCheckDepth) return kFullCheckRequired;
@@ -3100,7 +3105,7 @@
   DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
   uint32_t check_depths = 0;
   for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (s->num_heap_slots() <= 0) continue;
+    if (!s->NeedsContext()) continue;
     if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
     int depth = current_scope()->ContextChainLength(s);
     if (depth > kMaxCheckDepth) return kFullCheckRequired;
@@ -3110,6 +3115,13 @@
   return check_depths;
 }
 
+float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
+  if (slot.IsInvalid()) return 0.0f;
+  Handle<TypeFeedbackVector> feedback_vector(
+      info()->closure()->feedback_vector(), isolate());
+  CallICNexus nexus(feedback_vector, slot);
+  return nexus.ComputeCallFrequency() * invocation_frequency_;
+}
 
 Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
   DCHECK(environment()->stack_height() >= arity);
@@ -3171,7 +3183,7 @@
   DCHECK(scope->is_script_scope());
 
   // Allocate a new local context.
-  Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+  Handle<ScopeInfo> scope_info = scope->scope_info();
   const Operator* op = javascript()->CreateScriptContext(scope_info);
   Node* local_context = NewNode(op, GetFunctionClosure());
   PrepareFrameState(local_context, BailoutId::ScriptContext(),
@@ -3185,7 +3197,7 @@
   DCHECK(scope->is_block_scope());
 
   // Allocate a new local context.
-  Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+  Handle<ScopeInfo> scope_info = scope->scope_info();
   const Operator* op = javascript()->CreateBlockContext(scope_info);
   Node* local_context = NewNode(op, GetFunctionClosureForContext());
 
@@ -3213,8 +3225,7 @@
   return object;
 }
 
-
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
   if (rest == nullptr) return nullptr;
 
   // Allocate and initialize a new arguments object.
@@ -3321,7 +3332,6 @@
                                          TypeofMode typeof_mode) {
   Node* the_hole = jsgraph()->TheHoleConstant();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       // Global var, const, or let variable.
       Handle<Name> name = variable->name();
@@ -3383,7 +3393,6 @@
                                            BailoutId bailout_id,
                                            OutputFrameStateCombine combine) {
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       // Global var, const, or let variable.
       Node* global = BuildLoadGlobalObject();
@@ -3422,7 +3431,6 @@
   Node* the_hole = jsgraph()->TheHoleConstant();
   VariableMode mode = variable->mode();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       // Global var, const, or let variable.
       Handle<Name> name = variable->name();
@@ -3433,15 +3441,7 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL:
       // Local var, const, or let variable.
-      if (mode == CONST_LEGACY && op != Token::INIT) {
-        // Non-initializing assignment to legacy const is
-        // - exception in strict mode.
-        // - ignored in sloppy mode.
-        if (is_strict(language_mode())) {
-          return BuildThrowConstAssignError(bailout_id);
-        }
-        return value;
-      } else if (mode == LET && op == Token::INIT) {
+      if (mode == LET && op == Token::INIT) {
         // No initialization check needed because scoping guarantees it. Note
         // that we still perform a lookup to keep the variable live, because
         // baseline code might contain debug code that inspects the variable.
@@ -3464,6 +3464,16 @@
         if (current->op() != the_hole->op() && variable->is_this()) {
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
+      } else if (mode == CONST && op != Token::INIT &&
+                 variable->is_sloppy_function_name()) {
+        // Non-initializing assignment to sloppy function names is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        DCHECK(!variable->binding_needs_init());
+        if (variable->throw_on_const_assignment(language_mode())) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
+        return value;
       } else if (mode == CONST && op != Token::INIT) {
         if (variable->binding_needs_init()) {
           Node* current = environment()->Lookup(variable);
@@ -3481,16 +3491,7 @@
     case VariableLocation::CONTEXT: {
       // Context variable (potentially up the context chain).
       int depth = current_scope()->ContextChainLength(variable->scope());
-      if (mode == CONST_LEGACY && op != Token::INIT) {
-        // Non-initializing assignment to legacy const is
-        // - exception in strict mode.
-        // - ignored in sloppy mode.
-        if (is_strict(language_mode())) {
-          return BuildThrowConstAssignError(bailout_id);
-        }
-        return value;
-      } else if (mode == LET && op != Token::INIT &&
-                 variable->binding_needs_init()) {
+      if (mode == LET && op != Token::INIT && variable->binding_needs_init()) {
         // Perform an initialization check for let declared variables.
         const Operator* op =
             javascript()->LoadContext(depth, variable->index(), false);
@@ -3506,6 +3507,16 @@
           Node* current = NewNode(op, current_context());
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
+      } else if (mode == CONST && op != Token::INIT &&
+                 variable->is_sloppy_function_name()) {
+        // Non-initializing assignment to sloppy function names is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        DCHECK(!variable->binding_needs_init());
+        if (variable->throw_on_const_assignment(language_mode())) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
+        return value;
       } else if (mode == CONST && op != Token::INIT) {
         if (variable->binding_needs_init()) {
           const Operator* op =
@@ -3688,9 +3699,8 @@
   return object;
 }
 
-
 Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
-                                          ObjectLiteralProperty* property,
+                                          LiteralProperty* property,
                                           int slot_number) {
   Expression* expr = property->value();
   if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
@@ -3989,8 +3999,8 @@
 
 bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
   if (info()->osr_ast_id() == stmt->OsrEntryId()) {
-    info()->set_osr_expr_stack_height(std::max(
-        environment()->stack_height(), info()->osr_expr_stack_height()));
+    DCHECK_EQ(-1, info()->osr_expr_stack_height());
+    info()->set_osr_expr_stack_height(environment()->stack_height());
     return true;
   }
   return false;
@@ -4183,9 +4193,39 @@
   }
 }
 
+void AstGraphBuilder::Environment::PrepareForOsrEntry() {
+  int size = static_cast<int>(values()->size());
+  Graph* graph = builder_->graph();
 
-void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
-                                                  bool is_osr) {
+  // Set the control and effect to the OSR loop entry.
+  Node* osr_loop_entry = graph->NewNode(builder_->common()->OsrLoopEntry(),
+                                        graph->start(), graph->start());
+  UpdateControlDependency(osr_loop_entry);
+  UpdateEffectDependency(osr_loop_entry);
+  // Set OSR values.
+  for (int i = 0; i < size; ++i) {
+    values()->at(i) =
+        graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
+  }
+
+  // Set the contexts.
+  // The innermost context is the OSR value, and the outer contexts are
+  // reconstructed by dynamically walking up the context chain.
+  Node* osr_context = nullptr;
+  const Operator* op =
+      builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+  const Operator* op_inner =
+      builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+  int last = static_cast<int>(contexts()->size() - 1);
+  for (int i = last; i >= 0; i--) {
+    osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
+                              : graph->NewNode(op, osr_context, osr_context,
+                                               osr_loop_entry);
+    contexts()->at(i) = osr_context;
+  }
+}
+
+void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
   int size = static_cast<int>(values()->size());
 
   Node* control = builder_->NewLoop();
@@ -4220,40 +4260,6 @@
       contexts()->at(i) = builder_->NewPhi(1, context, control);
     }
   }
-
-  if (is_osr) {
-    // Merge OSR values as inputs to the phis of the loop.
-    Graph* graph = builder_->graph();
-    Node* osr_loop_entry = builder_->graph()->NewNode(
-        builder_->common()->OsrLoopEntry(), graph->start(), graph->start());
-
-    builder_->MergeControl(control, osr_loop_entry);
-    builder_->MergeEffect(effect, osr_loop_entry, control);
-
-    for (int i = 0; i < size; ++i) {
-      Node* value = values()->at(i);
-      Node* osr_value =
-          graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
-      values()->at(i) = builder_->MergeValue(value, osr_value, control);
-    }
-
-    // Rename all the contexts in the environment.
-    // The innermost context is the OSR value, and the outer contexts are
-    // reconstructed by dynamically walking up the context chain.
-    Node* osr_context = nullptr;
-    const Operator* op =
-        builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
-    const Operator* op_inner =
-        builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
-    int last = static_cast<int>(contexts()->size() - 1);
-    for (int i = last; i >= 0; i--) {
-      Node* context = contexts()->at(i);
-      osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
-                                : graph->NewNode(op, osr_context, osr_context,
-                                                 osr_loop_entry);
-      contexts()->at(i) = builder_->MergeValue(context, osr_context, control);
-    }
-  }
 }
 
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index bd307ba..27f2c9b 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -37,6 +37,7 @@
 class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
  public:
   AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+                  float invocation_frequency,
                   LoopAssignmentAnalysis* loop_assignment = nullptr,
                   TypeHintAnalysis* type_hint_analysis = nullptr);
   virtual ~AstGraphBuilder() {}
@@ -80,6 +81,7 @@
   Zone* local_zone_;
   CompilationInfo* info_;
   JSGraph* jsgraph_;
+  float const invocation_frequency_;
   Environment* environment_;
   AstContext* ast_context_;
 
@@ -264,6 +266,9 @@
   uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
   uint32_t ComputeBitsetForDynamicContext(Variable* variable);
 
+  // Computes the frequency for JSCallFunction and JSCallConstruct nodes.
+  float ComputeCallFrequency(FeedbackVectorSlot slot) const;
+
   // ===========================================================================
   // The following build methods all generate graph fragments and return one
   // resulting node. The operand stack height remains the same, variables and
@@ -278,8 +283,8 @@
   // Builder to create an arguments object if it is used.
   Node* BuildArgumentsObject(Variable* arguments);
 
-  // Builder to create an array of rest parameters if used
-  Node* BuildRestArgumentsArray(Variable* rest, int index);
+  // Builder to create an array of rest parameters if used.
+  Node* BuildRestArgumentsArray(Variable* rest);
 
   // Builder that assigns to the {.this_function} internal variable if needed.
   Node* BuildThisFunctionVariable(Variable* this_function_var);
@@ -342,8 +347,7 @@
   // Builder for adding the [[HomeObject]] to a value if the value came from a
   // function literal and needs a home object. Do nothing otherwise.
   Node* BuildSetHomeObject(Node* value, Node* home_object,
-                           ObjectLiteralProperty* property,
-                           int slot_number = 0);
+                           LiteralProperty* property, int slot_number = 0);
 
   // Builders for error reporting at runtime.
   Node* BuildThrowError(Node* exception, BailoutId bailout_id);
@@ -575,6 +579,11 @@
   // Copies this environment at a loop header control-flow point.
   Environment* CopyForLoop(BitVector* assigned, bool is_osr = false);
 
+  // Copies this environment for Osr entry. This only produces environment
+  // of the right shape, the caller is responsible for filling in the right
+  // values and dependencies.
+  Environment* CopyForOsrEntry();
+
  private:
   AstGraphBuilder* builder_;
   int parameters_count_;
@@ -604,7 +613,8 @@
   bool IsLivenessBlockConsistent();
 
   // Prepare environment to be used as loop header.
-  void PrepareForLoop(BitVector* assigned, bool is_osr = false);
+  void PrepareForLoop(BitVector* assigned);
+  void PrepareForOsrEntry();
 };
 
 }  // namespace compiler
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index f1469f7..82eaeb2 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -3,8 +3,8 @@
 // found in the LICENSE file.
 
 #include "src/compiler/ast-loop-assignment-analyzer.h"
-#include "src/compiler.h"
-#include "src/parsing/parser.h"
+#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 
 namespace v8 {
 namespace internal {
@@ -122,7 +122,7 @@
 void ALAA::VisitClassLiteral(ClassLiteral* e) {
   VisitIfNotNull(e->extends());
   VisitIfNotNull(e->constructor());
-  ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+  ZoneList<ClassLiteralProperty*>* properties = e->properties();
   for (int i = 0; i < properties->length(); i++) {
     Visit(properties->at(i)->key());
     Visit(properties->at(i)->value());
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
index 0893fd1..44ad7be 100644
--- a/src/compiler/ast-loop-assignment-analyzer.h
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -7,7 +7,7 @@
 
 #include "src/ast/ast.h"
 #include "src/bit-vector.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc
index a966a5b..40f0a29 100644
--- a/src/compiler/basic-block-instrumentor.cc
+++ b/src/compiler/basic-block-instrumentor.cc
@@ -6,13 +6,14 @@
 
 #include <sstream>
 
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/schedule.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index a17947a..d26ff93 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -4,10 +4,14 @@
 
 #include "src/compiler/bytecode-graph-builder.h"
 
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/bytecode-branch-analysis.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/operator-properties.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -26,6 +30,7 @@
 
   Node* LookupAccumulator() const;
   Node* LookupRegister(interpreter::Register the_register) const;
+  void MarkAllRegistersLive();
 
   void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
   void BindRegister(interpreter::Register the_register, Node* node,
@@ -42,7 +47,8 @@
 
   // Preserve a checkpoint of the environment for the IR graph. Any
   // further mutation of the environment will not affect checkpoints.
-  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
+                   bool owner_has_exception);
 
   // Returns true if the state values are up to date with the current
   // environment.
@@ -57,27 +63,36 @@
   Node* Context() const { return context_; }
   void SetContext(Node* new_context) { context_ = new_context; }
 
-  Environment* CopyForConditional() const;
+  Environment* CopyForConditional();
   Environment* CopyForLoop();
+  Environment* CopyForOsrEntry();
   void Merge(Environment* other);
-  void PrepareForOsr();
+  void PrepareForOsrEntry();
 
   void PrepareForLoopExit(Node* loop);
 
  private:
-  explicit Environment(const Environment* copy);
+  Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
   void PrepareForLoop();
+
+  enum { kNotCached, kCached };
+
   bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
-                              int output_poke_start, int output_poke_end);
+                              int output_poke_start, int output_poke_end,
+                              int cached = kNotCached);
   bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
   void UpdateStateValues(Node** state_values, int offset, int count);
+  void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
 
   int RegisterToValuesIndex(interpreter::Register the_register) const;
 
+  bool IsLivenessBlockConsistent() const;
+
   Zone* zone() const { return builder_->local_zone(); }
   Graph* graph() const { return builder_->graph(); }
   CommonOperatorBuilder* common() const { return builder_->common(); }
   BytecodeGraphBuilder* builder() const { return builder_; }
+  LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
   const NodeVector* values() const { return &values_; }
   NodeVector* values() { return &values_; }
   int register_base() const { return register_base_; }
@@ -86,6 +101,7 @@
   BytecodeGraphBuilder* builder_;
   int register_count_;
   int parameter_count_;
+  LivenessAnalyzerBlock* liveness_block_;
   Node* context_;
   Node* control_dependency_;
   Node* effect_dependency_;
@@ -109,7 +125,7 @@
         output_poke_count_(0) {
     BailoutId id_before(builder->bytecode_iterator().current_offset());
     frame_state_before_ = builder_->environment()->Checkpoint(
-        id_before, OutputFrameStateCombine::Ignore());
+        id_before, OutputFrameStateCombine::Ignore(), false);
     id_after_ = BailoutId(id_before.ToInt() +
                           builder->bytecode_iterator().current_bytecode_size());
     // Create an explicit checkpoint node for before the operation.
@@ -136,8 +152,9 @@
       // Add the frame state for after the operation.
       DCHECK_EQ(IrOpcode::kDead,
                 NodeProperties::GetFrameStateInput(node)->opcode());
-      Node* frame_state_after =
-          builder_->environment()->Checkpoint(id_after_, combine);
+      bool has_exception = NodeProperties::IsExceptionalCall(node);
+      Node* frame_state_after = builder_->environment()->Checkpoint(
+          id_after_, combine, has_exception);
       NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
     }
 
@@ -171,6 +188,9 @@
     : builder_(builder),
       register_count_(register_count),
       parameter_count_(parameter_count),
+      liveness_block_(builder->is_liveness_analysis_enabled_
+                          ? builder_->liveness_analyzer()->NewBlock()
+                          : nullptr),
       context_(context),
       control_dependency_(control_dependency),
       effect_dependency_(control_dependency),
@@ -204,12 +224,13 @@
   values()->push_back(undefined_constant);
 }
 
-
 BytecodeGraphBuilder::Environment::Environment(
-    const BytecodeGraphBuilder::Environment* other)
+    const BytecodeGraphBuilder::Environment* other,
+    LivenessAnalyzerBlock* liveness_block)
     : builder_(other->builder_),
       register_count_(other->register_count_),
       parameter_count_(other->parameter_count_),
+      liveness_block_(liveness_block),
       context_(other->context_),
       control_dependency_(other->control_dependency_),
       effect_dependency_(other->effect_dependency_),
@@ -232,6 +253,10 @@
   }
 }
 
+bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
+  return !builder_->IsLivenessAnalysisEnabled() ==
+         (liveness_block() == nullptr);
+}
 
 Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
   return values()->at(accumulator_base_);
@@ -248,10 +273,22 @@
     return builder()->GetNewTarget();
   } else {
     int values_index = RegisterToValuesIndex(the_register);
+    if (liveness_block() != nullptr && !the_register.is_parameter()) {
+      DCHECK(IsLivenessBlockConsistent());
+      liveness_block()->Lookup(the_register.index());
+    }
     return values()->at(values_index);
   }
 }
 
+void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
+  DCHECK(IsLivenessBlockConsistent());
+  if (liveness_block() != nullptr) {
+    for (int i = 0; i < register_count(); ++i) {
+      liveness_block()->Lookup(i);
+    }
+  }
+}
 
 void BytecodeGraphBuilder::Environment::BindAccumulator(
     Node* node, FrameStateBeforeAndAfter* states) {
@@ -271,6 +308,10 @@
                                                             values_index));
   }
   values()->at(values_index) = node;
+  if (liveness_block() != nullptr && !the_register.is_parameter()) {
+    DCHECK(IsLivenessBlockConsistent());
+    liveness_block()->Bind(the_register.index());
+  }
 }
 
 
@@ -298,18 +339,41 @@
 BytecodeGraphBuilder::Environment*
 BytecodeGraphBuilder::Environment::CopyForLoop() {
   PrepareForLoop();
-  return new (zone()) Environment(this);
+  if (liveness_block() != nullptr) {
+    // Finish the current block before copying.
+    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+  }
+  return new (zone()) Environment(this, liveness_block());
 }
 
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
+  return new (zone())
+      Environment(this, builder_->liveness_analyzer()->NewBlock());
+}
 
 BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() const {
-  return new (zone()) Environment(this);
+BytecodeGraphBuilder::Environment::CopyForConditional() {
+  LivenessAnalyzerBlock* copy_liveness_block = nullptr;
+  if (liveness_block() != nullptr) {
+    copy_liveness_block =
+        builder_->liveness_analyzer()->NewBlock(liveness_block());
+    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+  }
+  return new (zone()) Environment(this, copy_liveness_block);
 }
 
 
 void BytecodeGraphBuilder::Environment::Merge(
     BytecodeGraphBuilder::Environment* other) {
+  if (builder_->is_liveness_analysis_enabled_) {
+    if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
+      liveness_block_ =
+          builder()->liveness_analyzer()->NewBlock(liveness_block());
+    }
+    liveness_block()->AddPredecessor(other->liveness_block());
+  }
+
   // Create a merge of the control dependencies of both environments and update
   // the current environment's control dependency accordingly.
   Node* control = builder()->MergeControl(GetControlDependency(),
@@ -352,34 +416,27 @@
   builder()->exit_controls_.push_back(terminate);
 }
 
-void BytecodeGraphBuilder::Environment::PrepareForOsr() {
+void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
   DCHECK_EQ(IrOpcode::kLoop, GetControlDependency()->opcode());
   DCHECK_EQ(1, GetControlDependency()->InputCount());
+
   Node* start = graph()->start();
 
-  // Create a control node for the OSR entry point and merge it into the loop
-  // header. Update the current environment's control dependency accordingly.
+  // Create a control node for the OSR entry point and update the current
+  // environment's dependencies accordingly.
   Node* entry = graph()->NewNode(common()->OsrLoopEntry(), start, start);
-  Node* control = builder()->MergeControl(GetControlDependency(), entry);
-  UpdateControlDependency(control);
+  UpdateControlDependency(entry);
+  UpdateEffectDependency(entry);
 
-  // Create a merge of the effect from the OSR entry and the existing effect
-  // dependency. Update the current environment's effect dependency accordingly.
-  Node* effect = builder()->MergeEffect(GetEffectDependency(), entry, control);
-  UpdateEffectDependency(effect);
-
-  // Rename all values in the environment which will extend or introduce Phi
-  // nodes to contain the OSR values available at the entry point.
-  Node* osr_context = graph()->NewNode(
-      common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry);
-  context_ = builder()->MergeValue(context_, osr_context, control);
+  // Create OSR values for each environment value.
+  SetContext(graph()->NewNode(
+      common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry));
   int size = static_cast<int>(values()->size());
   for (int i = 0; i < size; i++) {
     int idx = i;  // Indexing scheme follows {StandardFrame}, adapt accordingly.
     if (i >= register_base()) idx += InterpreterFrameConstants::kExtraSlotCount;
     if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
-    Node* osr_value = graph()->NewNode(common()->OsrValue(idx), entry);
-    values_[i] = builder()->MergeValue(values_[i], osr_value, control);
+    values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
   }
 }
 
@@ -434,13 +491,19 @@
   }
 }
 
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+    Node** state_values, int offset, int count) {
+  Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+  *state_values = builder_->state_values_cache_.GetNodeForValues(
+      env_values, static_cast<size_t>(count));
+}
 
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
-    BailoutId bailout_id, OutputFrameStateCombine combine) {
-  // TODO(rmcilroy): Consider using StateValuesCache for some state values.
+    BailoutId bailout_id, OutputFrameStateCombine combine,
+    bool owner_has_exception) {
   UpdateStateValues(&parameters_state_values_, 0, parameter_count());
-  UpdateStateValues(&registers_state_values_, register_base(),
-                    register_count());
+  UpdateStateValuesWithCache(&registers_state_values_, register_base(),
+                             register_count());
   UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
 
   const Operator* op = common()->FrameState(
@@ -450,20 +513,43 @@
       accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
       builder()->graph()->start());
 
+  if (liveness_block() != nullptr) {
+    // If the owning node has an exception, register the checkpoint to the
+    // predecessor so that the checkpoint is used for both the normal and the
+    // exceptional paths. Yes, this is a terrible hack and we might want
+    // to use an explicit frame state for the exceptional path.
+    if (owner_has_exception) {
+      liveness_block()->GetPredecessor()->Checkpoint(result);
+    } else {
+      liveness_block()->Checkpoint(result);
+    }
+  }
+
   return result;
 }
 
-
 bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
     Node** state_values, int offset, int count, int output_poke_start,
-    int output_poke_end) {
+    int output_poke_end, int cached) {
   DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
-  for (int i = 0; i < count; i++, offset++) {
-    if (offset < output_poke_start || offset >= output_poke_end) {
-      if ((*state_values)->InputAt(i) != values()->at(offset)) {
-        return false;
+  if (cached == kNotCached) {
+    for (int i = 0; i < count; i++, offset++) {
+      if (offset < output_poke_start || offset >= output_poke_end) {
+        if ((*state_values)->InputAt(i) != values()->at(offset)) {
+          return false;
+        }
       }
     }
+  } else {
+    for (StateValuesAccess::TypedNode state_value :
+         StateValuesAccess(*state_values)) {
+      if (offset < output_poke_start || offset >= output_poke_end) {
+        if (state_value.node != values()->at(offset)) {
+          return false;
+        }
+      }
+      ++offset;
+    }
   }
   return true;
 }
@@ -478,16 +564,18 @@
                                 output_poke_start, output_poke_end) &&
          StateValuesAreUpToDate(&registers_state_values_, register_base(),
                                 register_count(), output_poke_start,
-                                output_poke_end) &&
+                                output_poke_end, kCached) &&
          StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
                                 1, output_poke_start, output_poke_end);
 }
 
 BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
                                            CompilationInfo* info,
-                                           JSGraph* jsgraph)
+                                           JSGraph* jsgraph,
+                                           float invocation_frequency)
     : local_zone_(local_zone),
       jsgraph_(jsgraph),
+      invocation_frequency_(invocation_frequency),
       bytecode_array_(handle(info->shared_info()->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
@@ -502,7 +590,13 @@
       current_exception_handler_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
-      exit_controls_(local_zone) {}
+      exit_controls_(local_zone),
+      is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
+                                    info->is_deoptimization_enabled()),
+      state_values_cache_(jsgraph),
+      liveness_analyzer_(
+          static_cast<size_t>(bytecode_array()->register_count()), local_zone) {
+}
 
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
@@ -556,10 +650,6 @@
 }
 
 bool BytecodeGraphBuilder::CreateGraph() {
-  // Set up the basic structure of the graph. Outputs for {Start} are
-  // the formal parameters (including the receiver) plus context and
-  // closure.
-
   // Set up the basic structure of the graph. Outputs for {Start} are the formal
   // parameters (including the receiver) plus new target, number of arguments,
   // context and closure.
@@ -571,10 +661,6 @@
                   GetFunctionContext());
   set_environment(&env);
 
-  // For OSR add an {OsrNormalEntry} as the start of the top-level environment.
-  // It will be replaced with {Dead} after typing and optimizations.
-  if (!osr_ast_id_.IsNone()) NewNode(common()->OsrNormalEntry());
-
   VisitBytecodes();
 
   // Finish the basic structure of the graph.
@@ -584,9 +670,25 @@
   Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
   graph()->SetEnd(end);
 
+  ClearNonLiveSlotsInFrameStates();
+
   return true;
 }
 
+void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
+  if (!IsLivenessAnalysisEnabled()) {
+    return;
+  }
+  NonLiveFrameStateSlotReplacer replacer(
+      &state_values_cache_, jsgraph()->OptimizedOutConstant(),
+      liveness_analyzer()->local_count(), local_zone());
+  liveness_analyzer()->Run(&replacer);
+  if (FLAG_trace_environment_liveness) {
+    OFStream os(stdout);
+    liveness_analyzer()->Print(os);
+  }
+}
+
 void BytecodeGraphBuilder::VisitBytecodes() {
   BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
   BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
@@ -596,12 +698,14 @@
   set_loop_analysis(&loop_analysis);
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
   set_bytecode_iterator(&iterator);
+  BuildOSRNormalEntryPoint();
   while (!iterator.done()) {
     int current_offset = iterator.current_offset();
     EnterAndExitExceptionHandlers(current_offset);
     SwitchToMergeEnvironment(current_offset);
     if (environment() != nullptr) {
       BuildLoopHeaderEnvironment(current_offset);
+      BuildOSRLoopEntryPoint(current_offset);
 
       switch (iterator.current_bytecode()) {
 #define BYTECODE_CASE(name, ...)       \
@@ -682,9 +786,9 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(0));
+Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+                                            TypeofMode typeof_mode) {
+  VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
   DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
             feedback_vector()->GetKind(feedback.slot()));
   Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
@@ -694,20 +798,23 @@
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
   FrameStateBeforeAndAfter states(this);
-  Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+                               TypeofMode::NOT_INSIDE_TYPEOF);
   environment()->BindAccumulator(node, &states);
 }
 
 void BytecodeGraphBuilder::VisitLdrGlobal() {
   FrameStateBeforeAndAfter states(this);
-  Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+                               TypeofMode::NOT_INSIDE_TYPEOF);
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
                               &states);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
   FrameStateBeforeAndAfter states(this);
-  Node* node = BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+                               TypeofMode::INSIDE_TYPEOF);
   environment()->BindAccumulator(node, &states);
 }
 
@@ -733,14 +840,12 @@
 }
 
 Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
-  // TODO(mythria): LoadContextSlots are unrolled by the required depth when
-  // generating bytecode. Hence the value of depth is always 0. Update this
-  // code, when the implementation changes.
   // TODO(mythria): immutable flag is also set to false. This information is not
   // available in bytecode array. update this code when the implementation
   // changes.
   const Operator* op = javascript()->LoadContext(
-      0, bytecode_iterator().GetIndexOperand(1), false);
+      bytecode_iterator().GetUnsignedImmediateOperand(2),
+      bytecode_iterator().GetIndexOperand(1), false);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   return NewNode(op, context);
@@ -753,15 +858,13 @@
 
 void BytecodeGraphBuilder::VisitLdrContextSlot() {
   Node* node = BuildLoadContextSlot();
-  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node);
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node);
 }
 
 void BytecodeGraphBuilder::VisitStaContextSlot() {
-  // TODO(mythria): LoadContextSlots are unrolled by the required depth when
-  // generating bytecode. Hence the value of depth is always 0. Update this
-  // code, when the implementation changes.
-  const Operator* op =
-      javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(1));
+  const Operator* op = javascript()->StoreContext(
+      bytecode_iterator().GetUnsignedImmediateOperand(2),
+      bytecode_iterator().GetIndexOperand(1));
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* value = environment()->LookupAccumulator();
@@ -788,6 +891,150 @@
   BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
 }
 
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
+    uint32_t depth) {
+  // Output environment where the context has an extension
+  Environment* slow_environment = nullptr;
+
+  // We only need to check up to the last-but-one depth, because the an eval in
+  // the same scope as the variable itself has no way of shadowing it.
+  for (uint32_t d = 0; d < depth; d++) {
+    Node* extension_slot =
+        NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
+                environment()->Context());
+
+    Node* check_no_extension =
+        NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+                extension_slot, jsgraph()->TheHoleConstant());
+
+    NewBranch(check_no_extension);
+    Environment* true_environment = environment()->CopyForConditional();
+
+    {
+      NewIfFalse();
+      // If there is an extension, merge into the slow path.
+      if (slow_environment == nullptr) {
+        slow_environment = environment();
+        NewMerge();
+      } else {
+        slow_environment->Merge(environment());
+      }
+    }
+
+    {
+      set_environment(true_environment);
+      NewIfTrue();
+      // Do nothing on if there is no extension, eventually falling through to
+      // the fast path.
+    }
+  }
+
+  // The depth can be zero, in which case no slow-path checks are built, and the
+  // slow path environment can be null.
+  DCHECK(depth == 0 || slow_environment != nullptr);
+
+  return slow_environment;
+}
+
+void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
+  uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(2);
+
+  // Check if any context in the depth has an extension.
+  Environment* slow_environment = CheckContextExtensions(depth);
+
+  // Fast path, do a context load.
+  {
+    uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
+
+    const Operator* op = javascript()->LoadContext(depth, slot_index, false);
+    Node* context = environment()->Context();
+    environment()->BindAccumulator(NewNode(op, context));
+  }
+
+  // Only build the slow path if there were any slow-path checks.
+  if (slow_environment != nullptr) {
+    // Add a merge to the fast environment.
+    NewMerge();
+    Environment* fast_environment = environment();
+
+    // Slow path, do a runtime load lookup.
+    set_environment(slow_environment);
+    {
+      FrameStateBeforeAndAfter states(this);
+
+      Node* name = jsgraph()->Constant(
+          bytecode_iterator().GetConstantForIndexOperand(0));
+
+      const Operator* op =
+          javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                        ? Runtime::kLoadLookupSlot
+                                        : Runtime::kLoadLookupSlotInsideTypeof);
+      Node* value = NewNode(op, name);
+      environment()->BindAccumulator(value, &states);
+    }
+
+    fast_environment->Merge(environment());
+    set_environment(fast_environment);
+  }
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupContextSlot() {
+  BuildLdaLookupContextSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() {
+  BuildLdaLookupContextSlot(TypeofMode::INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
+  uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(2);
+
+  // Check if any context in the depth has an extension.
+  Environment* slow_environment = CheckContextExtensions(depth);
+
+  // Fast path, do a global load.
+  {
+    FrameStateBeforeAndAfter states(this);
+    Node* node =
+        BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+    environment()->BindAccumulator(node, &states);
+  }
+
+  // Only build the slow path if there were any slow-path checks.
+  if (slow_environment != nullptr) {
+    // Add a merge to the fast environment.
+    NewMerge();
+    Environment* fast_environment = environment();
+
+    // Slow path, do a runtime load lookup.
+    set_environment(slow_environment);
+    {
+      FrameStateBeforeAndAfter states(this);
+
+      Node* name = jsgraph()->Constant(
+          bytecode_iterator().GetConstantForIndexOperand(0));
+
+      const Operator* op =
+          javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                        ? Runtime::kLoadLookupSlot
+                                        : Runtime::kLoadLookupSlotInsideTypeof);
+      Node* value = NewNode(op, name);
+      environment()->BindAccumulator(value, &states);
+    }
+
+    fast_environment->Merge(environment());
+    set_environment(fast_environment);
+  }
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupGlobalSlot() {
+  BuildLdaLookupGlobalSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
+  BuildLdaLookupGlobalSlot(TypeofMode::INSIDE_TYPEOF);
+}
+
 void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -920,7 +1167,10 @@
   Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   PretenureFlag tenured =
-      bytecode_iterator().GetFlagOperand(1) ? TENURED : NOT_TENURED;
+      interpreter::CreateClosureFlags::PretenuredBit::decode(
+          bytecode_iterator().GetFlagOperand(1))
+          ? TENURED
+          : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
@@ -936,7 +1186,7 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateFunctionContext() {
-  uint32_t slots = bytecode_iterator().GetIndexOperand(0);
+  uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
   const Operator* op = javascript()->CreateFunctionContext(slots);
   Node* context = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(context);
@@ -947,9 +1197,11 @@
   Node* exception = environment()->LookupRegister(reg);
   Handle<String> name =
       Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(2));
   Node* closure = environment()->LookupAccumulator();
 
-  const Operator* op = javascript()->CreateCatchContext(name);
+  const Operator* op = javascript()->CreateCatchContext(name, scope_info);
   Node* context = NewNode(op, exception, closure);
   environment()->BindAccumulator(context);
 }
@@ -957,8 +1209,10 @@
 void BytecodeGraphBuilder::VisitCreateWithContext() {
   Node* object =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(1));
 
-  const Operator* op = javascript()->CreateWithContext();
+  const Operator* op = javascript()->CreateWithContext(scope_info);
   Node* context = NewNode(op, object, environment()->LookupAccumulator());
   environment()->BindAccumulator(context);
 }
@@ -1003,6 +1257,11 @@
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
   int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  // Disable allocation site mementos. Only unoptimized code will collect
+  // feedback about allocation site. Once the code is optimized we expect the
+  // data to converge. So, we disable allocation site mementos in optimized
+  // code. We can revisit this when we have data to the contrary.
+  literal_flags |= ArrayLiteral::kDisableMementos;
   int number_of_elements = constant_elements->length();
   const Operator* op = javascript()->CreateLiteralArray(
       constant_elements, literal_flags, literal_index, number_of_elements);
@@ -1054,11 +1313,12 @@
   // Slot index of 0 is used indicate no feedback slot is available. Assert
   // the assumption that slot index 0 is never a valid feedback slot.
   STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
-  VectorSlotPair feedback =
-      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+  int const slot_id = bytecode_iterator().GetIndexOperand(3);
+  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
+  float const frequency = ComputeCallFrequency(slot_id);
   const Operator* call = javascript()->CallFunction(
-      arg_count + 1, feedback, receiver_hint, tail_call_mode);
+      arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, &states);
 }
@@ -1142,13 +1402,13 @@
     const Operator* call_new_op, Node* callee, Node* new_target,
     interpreter::Register first_arg, size_t arity) {
   Node** all = local_zone()->NewArray<Node*>(arity);
-  all[0] = new_target;
+  all[0] = callee;
   int first_arg_index = first_arg.index();
   for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
     all[i] = environment()->LookupRegister(
         interpreter::Register(first_arg_index + i - 1));
   }
-  all[arity - 1] = callee;
+  all[arity - 1] = new_target;
   Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
   return value;
 }
@@ -1158,12 +1418,18 @@
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  // Slot index of 0 is used indicate no feedback slot is available. Assert
+  // the assumption that slot index 0 is never a valid feedback slot.
+  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  int const slot_id = bytecode_iterator().GetIndexOperand(3);
+  VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
   Node* new_target = environment()->LookupAccumulator();
   Node* callee = environment()->LookupRegister(callee_reg);
-  // TODO(turbofan): Pass the feedback here.
+
+  float const frequency = ComputeCallFrequency(slot_id);
   const Operator* call = javascript()->CallConstruct(
-      static_cast<int>(arg_count) + 2, VectorSlotPair());
+      static_cast<int>(arg_count) + 2, frequency, feedback);
   Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
                                         arg_count + 2);
   environment()->BindAccumulator(value, &states);
@@ -1207,13 +1473,33 @@
     int operand_index) {
   FeedbackVectorSlot slot = feedback_vector()->ToSlot(
       bytecode_iterator().GetIndexOperand(operand_index));
-  DCHECK_EQ(FeedbackVectorSlotKind::GENERAL, feedback_vector()->GetKind(slot));
-  Object* feedback = feedback_vector()->Get(slot);
-  BinaryOperationHint hint = BinaryOperationHint::kAny;
-  if (feedback->IsSmi()) {
-    hint = BinaryOperationHintFromFeedback((Smi::cast(feedback))->value());
+  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+            feedback_vector()->GetKind(slot));
+  BinaryOpICNexus nexus(feedback_vector(), slot);
+  return nexus.GetBinaryOperationFeedback();
+}
+
+// Helper function to create compare operation hint from the recorded type
+// feedback.
+CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
+  int slot_index = bytecode_iterator().GetIndexOperand(1);
+  if (slot_index == 0) {
+    return CompareOperationHint::kAny;
   }
-  return hint;
+  FeedbackVectorSlot slot =
+      feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+            feedback_vector()->GetKind(slot));
+  CompareICNexus nexus(feedback_vector(), slot);
+  return nexus.GetCompareOperationFeedback();
+}
+
+float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+  if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+    CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+    return nexus.ComputeCallFrequency() * invocation_frequency_;
+  }
+  return 0.0f;
 }
 
 void BytecodeGraphBuilder::VisitAdd() {
@@ -1379,38 +1665,31 @@
 }
 
 void BytecodeGraphBuilder::VisitTestEqual() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->Equal(hint));
+  BuildCompareOp(javascript()->Equal(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestNotEqual() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->NotEqual(hint));
+  BuildCompareOp(javascript()->NotEqual(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestEqualStrict() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->StrictEqual(hint));
+  BuildCompareOp(javascript()->StrictEqual(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThan() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->LessThan(hint));
+  BuildCompareOp(javascript()->LessThan(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThan() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->GreaterThan(hint));
+  BuildCompareOp(javascript()->GreaterThan(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->LessThanOrEqual(hint));
+  BuildCompareOp(javascript()->LessThanOrEqual(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
-  CompareOperationHint hint = CompareOperationHint::kAny;
-  BuildCompareOp(javascript()->GreaterThanOrEqual(hint));
+  BuildCompareOp(javascript()->GreaterThanOrEqual(GetCompareOperationHint()));
 }
 
 void BytecodeGraphBuilder::VisitTestIn() {
@@ -1444,37 +1723,28 @@
 
 void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
 
+void BytecodeGraphBuilder::VisitJumpIfTrue() { BuildJumpIfTrue(); }
 
-void BytecodeGraphBuilder::VisitJumpIfTrue() {
-  BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant() { BuildJumpIfTrue(); }
 
-void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
-  BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfFalse() { BuildJumpIfFalse(); }
 
-void BytecodeGraphBuilder::VisitJumpIfFalse() {
-  BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
-  BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant() { BuildJumpIfFalse(); }
 
 void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
-  BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+  BuildJumpIfToBooleanTrue();
 }
 
 void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
-  BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+  BuildJumpIfToBooleanTrue();
 }
 
 void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
-  BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+  BuildJumpIfToBooleanFalse();
 }
 
 void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
-  BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+  BuildJumpIfToBooleanFalse();
 }
 
 void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
@@ -1499,21 +1769,14 @@
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
+void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
+
 void BytecodeGraphBuilder::VisitStackCheck() {
   FrameStateBeforeAndAfter states(this);
   Node* node = NewNode(javascript()->StackCheck());
   environment()->RecordAfterState(node, &states);
 }
 
-void BytecodeGraphBuilder::VisitOsrPoll() {
-  // TODO(4764): This should be moved into the {VisitBytecodes} once we merge
-  // the polling with existing bytecode. This will also guarantee that we are
-  // not missing the OSR entry point, which we wouldn't catch right now.
-  if (osr_ast_id_.ToInt() == bytecode_iterator().current_offset()) {
-    environment()->PrepareForOsr();
-  }
-}
-
 void BytecodeGraphBuilder::VisitReturn() {
   BuildLoopExitsForFunctionExit();
   Node* control =
@@ -1526,6 +1789,7 @@
   Node* call =
       NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
   environment()->BindAccumulator(call, &states);
+  environment()->MarkAllRegistersLive();
 }
 
 // We cannot create a graph from the debugger copy of the bytecode array.
@@ -1545,13 +1809,15 @@
 
 void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
 
-void BytecodeGraphBuilder::VisitForInDone() {
+void BytecodeGraphBuilder::VisitForInContinue() {
   FrameStateBeforeAndAfter states(this);
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* cache_length =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
-  Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+  Node* exit_cond =
+      NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall), index,
+              cache_length);
   environment()->BindAccumulator(exit_cond, &states);
 }
 
@@ -1578,7 +1844,8 @@
   FrameStateBeforeAndAfter states(this);
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  index = NewNode(javascript()->ForInStep(), index);
+  index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
+                  jsgraph()->OneConstant());
   environment()->BindAccumulator(index, &states);
 }
 
@@ -1681,6 +1948,28 @@
   set_environment(nullptr);
 }
 
+void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
+  if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+    // For OSR add a special {OsrLoopEntry} node into the current loop header.
+    // It will be turned into a usable entry by the OSR deconstruction.
+    Environment* loop_env = merge_environments_[current_offset];
+    Environment* osr_env = loop_env->CopyForOsrEntry();
+    osr_env->PrepareForOsrEntry();
+    loop_env->Merge(osr_env);
+  }
+}
+
+void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
+  if (!osr_ast_id_.IsNone()) {
+    // For OSR add an {OsrNormalEntry} as the the top-level environment start.
+    // It will be replaced with {Dead} by the OSR deconstruction.
+    NewNode(common()->OsrNormalEntry());
+    // Note that the requested OSR entry point must be the target of a backward
+    // branch, otherwise there will not be a proper loop header available.
+    DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+  }
+}
+
 void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
   int origin_offset = bytecode_iterator().current_offset();
   // Only build loop exits for forward edges.
@@ -1707,8 +1996,7 @@
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
 }
 
-
-void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
+void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
   NewBranch(condition);
   Environment* if_false_environment = environment()->CopyForConditional();
   NewIfTrue();
@@ -1717,24 +2005,43 @@
   NewIfFalse();
 }
 
+void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
+  NewBranch(condition);
+  Environment* if_true_environment = environment()->CopyForConditional();
+  NewIfFalse();
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+  set_environment(if_true_environment);
+  NewIfTrue();
+}
 
 void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
       NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
               accumulator, comperand);
-  BuildConditionalJump(condition);
+  BuildJumpIf(condition);
 }
 
+void BytecodeGraphBuilder::BuildJumpIfFalse() {
+  BuildJumpIfNot(environment()->LookupAccumulator());
+}
 
-void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
+void BytecodeGraphBuilder::BuildJumpIfTrue() {
+  BuildJumpIf(environment()->LookupAccumulator());
+}
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
   Node* accumulator = environment()->LookupAccumulator();
-  Node* to_boolean =
-      NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), to_boolean,
-              comperand);
-  BuildConditionalJump(condition);
+      NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+  BuildJumpIf(condition);
+}
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanFalse() {
+  Node* accumulator = environment()->LookupAccumulator();
+  Node* condition =
+      NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+  BuildJumpIfNot(condition);
 }
 
 void BytecodeGraphBuilder::BuildJumpIfNotHole() {
@@ -1742,10 +2049,7 @@
   Node* condition =
       NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
               accumulator, jsgraph()->TheHoleConstant());
-  Node* node =
-      NewNode(common()->Select(MachineRepresentation::kTagged), condition,
-              jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
-  BuildConditionalJump(node);
+  BuildJumpIfNot(condition);
 }
 
 Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 2f3acc1..53582f7 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -5,10 +5,11 @@
 #ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 #define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 
-#include "src/compiler.h"
 #include "src/compiler/bytecode-branch-analysis.h"
 #include "src/compiler/bytecode-loop-analysis.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/state-values-utils.h"
 #include "src/compiler/type-hint-analyzer.h"
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/interpreter/bytecode-flags.h"
@@ -16,6 +17,9 @@
 
 namespace v8 {
 namespace internal {
+
+class CompilationInfo;
+
 namespace compiler {
 
 // The BytecodeGraphBuilder produces a high-level IR graph based on
@@ -23,7 +27,7 @@
 class BytecodeGraphBuilder {
  public:
   BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                       JSGraph* jsgraph);
+                       JSGraph* jsgraph, float invocation_frequency);
 
   // Creates a graph by visiting bytecodes.
   bool CreateGraph();
@@ -113,16 +117,22 @@
                                     interpreter::Register first_arg,
                                     size_t arity);
 
+  // Computes register liveness and replaces dead ones in frame states with the
+  // undefined values.
+  void ClearNonLiveSlotsInFrameStates();
+
   void BuildCreateLiteral(const Operator* op);
   void BuildCreateArguments(CreateArgumentsType type);
   Node* BuildLoadContextSlot();
-  Node* BuildLoadGlobal(TypeofMode typeof_mode);
+  Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
   Node* BuildNamedLoad();
   void BuildNamedStore(LanguageMode language_mode);
   Node* BuildKeyedLoad();
   void BuildKeyedStore(LanguageMode language_mode);
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
+  void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
+  void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
   void BuildStaLookupSlot(LanguageMode language_mode);
   void BuildCall(TailCallMode tail_call_mode);
   void BuildThrow();
@@ -135,15 +145,30 @@
   void BuildForInNext();
   void BuildInvokeIntrinsic();
 
+  // Check the context chain for extensions, for lookup fast paths.
+  Environment* CheckContextExtensions(uint32_t depth);
+
   // Helper function to create binary operation hint from the recorded
   // type feedback.
   BinaryOperationHint GetBinaryOperationHint(int operand_index);
 
+  // Helper function to create compare operation hint from the recorded
+  // type feedback.
+  CompareOperationHint GetCompareOperationHint();
+
+  // Helper function to compute call frequency from the recorded type
+  // feedback.
+  float ComputeCallFrequency(int slot_id) const;
+
   // Control flow plumbing.
   void BuildJump();
-  void BuildConditionalJump(Node* condition);
+  void BuildJumpIf(Node* condition);
+  void BuildJumpIfNot(Node* condition);
   void BuildJumpIfEqual(Node* comperand);
-  void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+  void BuildJumpIfTrue();
+  void BuildJumpIfFalse();
+  void BuildJumpIfToBooleanTrue();
+  void BuildJumpIfToBooleanFalse();
   void BuildJumpIfNotHole();
 
   // Simulates control flow by forward-propagating environments.
@@ -154,6 +179,10 @@
   // Simulates control flow that exits the function body.
   void MergeControlToLeaveFunction(Node* exit);
 
+  // Builds entry points that are used by OSR deconstruction.
+  void BuildOSRLoopEntryPoint(int current_offset);
+  void BuildOSRNormalEntryPoint();
+
   // Builds loop exit nodes for every exited loop between the current bytecode
   // offset and {target_offset}.
   void BuildLoopExitsForBranch(int target_offset);
@@ -221,12 +250,19 @@
     loop_analysis_ = loop_analysis;
   }
 
+  LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
+
+  bool IsLivenessAnalysisEnabled() const {
+    return this->is_liveness_analysis_enabled_;
+  }
+
 #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
   BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
 #undef DECLARE_VISIT_BYTECODE
 
   Zone* local_zone_;
   JSGraph* jsgraph_;
+  float const invocation_frequency_;
   Handle<BytecodeArray> bytecode_array_;
   Handle<HandlerTable> exception_handler_table_;
   Handle<TypeFeedbackVector> feedback_vector_;
@@ -258,6 +294,13 @@
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
+  bool const is_liveness_analysis_enabled_;
+
+  StateValuesCache state_values_cache_;
+
+  // Analyzer of register liveness.
+  LivenessAnalyzer liveness_analyzer_;
+
   static int const kBinaryOperationHintIndex = 1;
   static int const kCountOperationHintIndex = 0;
   static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/src/compiler/bytecode-loop-analysis.h b/src/compiler/bytecode-loop-analysis.h
index 59fabce..1a86d7b 100644
--- a/src/compiler/bytecode-loop-analysis.h
+++ b/src/compiler/bytecode-loop-analysis.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
 
 #include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index f79497a..690a52b 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -7,7 +7,7 @@
 
 #include "src/compiler/linkage.h"
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index 4dd7e79..ff7ef31 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -20,7 +20,7 @@
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
 #include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -234,6 +234,13 @@
   return value;
 }
 
+Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
+  if (raw_assembler_->machine()->Is64()) {
+    return raw_assembler_->RoundInt64ToFloat64(value);
+  }
+  return raw_assembler_->ChangeInt32ToFloat64(value);
+}
+
 #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
   Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
 CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
@@ -299,6 +306,10 @@
                              IntPtrConstant(root_index * kPointerSize), value);
 }
 
+Node* CodeAssembler::Retain(Node* value) {
+  return raw_assembler_->Retain(value);
+}
+
 Node* CodeAssembler::Projection(int index, Node* value) {
   return raw_assembler_->Projection(index, value);
 }
@@ -425,6 +436,14 @@
                                           arg5, context);
 }
 
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3, Node* arg4, Node* arg5,
+                                     Node* arg6) {
+  return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
+                                          arg5, arg6, context);
+}
+
 Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
                               Node* arg1, size_t result_size) {
   Node* target = HeapConstant(callable.code());
@@ -446,6 +465,14 @@
                   result_size);
 }
 
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+                              Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                              size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+                  arg4, result_size);
+}
+
 Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
                                size_t result_size) {
   Node* target = HeapConstant(callable.code());
@@ -638,9 +665,11 @@
 }
 
 Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
-                               Node* target, Node** args, size_t result_size) {
+                               int js_parameter_count, Node* target,
+                               Node** args, size_t result_size) {
   CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      isolate(), zone(), descriptor,
+      descriptor.GetStackParameterCount() + js_parameter_count,
       CallDescriptor::kNoFlags, Operator::kNoProperties,
       MachineType::AnyTagged(), result_size);
 
@@ -745,6 +774,26 @@
 }
 
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  Node* arg5, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(6);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = arg5;
+  args[5] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, const Arg& arg1,
                                   const Arg& arg2, const Arg& arg3,
                                   const Arg& arg4, size_t result_size) {
@@ -803,10 +852,6 @@
                             Node* function, Node* receiver,
                             size_t result_size) {
   const int argc = 0;
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), argc + 1,
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
   Node* target = HeapConstant(callable.code());
 
   Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -815,17 +860,13 @@
   args[2] = receiver;
   args[3] = context;
 
-  return CallN(call_descriptor, target, args);
+  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
 }
 
 Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
                             Node* function, Node* receiver, Node* arg1,
                             size_t result_size) {
   const int argc = 1;
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), argc + 1,
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
   Node* target = HeapConstant(callable.code());
 
   Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -835,17 +876,13 @@
   args[3] = arg1;
   args[4] = context;
 
-  return CallN(call_descriptor, target, args);
+  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
 }
 
 Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
                             Node* function, Node* receiver, Node* arg1,
                             Node* arg2, size_t result_size) {
   const int argc = 2;
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), argc + 1,
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
   Node* target = HeapConstant(callable.code());
 
   Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -856,7 +893,15 @@
   args[4] = arg2;
   args[5] = context;
 
-  return CallN(call_descriptor, target, args);
+  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+}
+
+Node* CodeAssembler::CallCFunction2(MachineType return_type,
+                                    MachineType arg0_type,
+                                    MachineType arg1_type, Node* function,
+                                    Node* arg0, Node* arg1) {
+  return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
+                                        function, arg0, arg1);
 }
 
 void CodeAssembler::Goto(CodeAssembler::Label* label) {
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index bea999b..8372334 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -15,7 +15,7 @@
 #include "src/heap/heap.h"
 #include "src/machine-type.h"
 #include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -54,8 +54,10 @@
   V(IntPtrGreaterThanOrEqual)                    \
   V(IntPtrEqual)                                 \
   V(Uint32LessThan)                              \
+  V(Uint32LessThanOrEqual)                       \
   V(Uint32GreaterThanOrEqual)                    \
   V(UintPtrLessThan)                             \
+  V(UintPtrGreaterThan)                          \
   V(UintPtrGreaterThanOrEqual)                   \
   V(WordEqual)                                   \
   V(WordNotEqual)                                \
@@ -133,7 +135,9 @@
   V(Float64Tanh)                        \
   V(Float64ExtractLowWord32)            \
   V(Float64ExtractHighWord32)           \
+  V(BitcastTaggedToWord)                \
   V(BitcastWordToTagged)                \
+  V(BitcastWordToTaggedSigned)          \
   V(TruncateFloat64ToFloat32)           \
   V(TruncateFloat64ToWord32)            \
   V(TruncateInt64ToInt32)               \
@@ -144,10 +148,12 @@
   V(ChangeUint32ToFloat64)              \
   V(ChangeUint32ToUint64)               \
   V(RoundFloat64ToInt32)                \
+  V(Float64SilenceNaN)                  \
   V(Float64RoundDown)                   \
   V(Float64RoundUp)                     \
   V(Float64RoundTruncate)               \
-  V(Word32Clz)
+  V(Word32Clz)                          \
+  V(Word32BinaryNot)
 
 // A "public" interface used by components outside of compiler directory to
 // create code objects with TurboFan's backend. This class is mostly a thin shim
@@ -283,11 +289,19 @@
   CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
 #undef DECLARE_CODE_ASSEMBLER_UNARY_OP
 
+  // Changes an intptr_t to a double, e.g. for storing an element index
+  // outside Smi range in a HeapNumber. Lossless on 32-bit,
+  // rounds on 64-bit (which doesn't affect valid element indices).
+  Node* RoundIntPtrToFloat64(Node* value);
   // No-op on 32-bit, otherwise zero extend.
   Node* ChangeUint32ToWord(Node* value);
   // No-op on 32-bit, otherwise sign extend.
   Node* ChangeInt32ToIntPtr(Node* value);
 
+  // No-op that guarantees that the value is kept alive till this point even
+  // if GC happens.
+  Node* Retain(Node* value);
+
   // Projections
   Node* Projection(int index, Node* value);
 
@@ -315,6 +329,9 @@
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                         Node* arg5);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                        Node* arg5, Node* arg6);
 
   // A pair of a zero-based argument index and a value.
   // It helps writing arguments order independent code.
@@ -331,6 +348,8 @@
                  Node* arg2, size_t result_size = 1);
   Node* CallStub(Callable const& callable, Node* context, Node* arg1,
                  Node* arg2, Node* arg3, size_t result_size = 1);
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
   Node* CallStubN(Callable const& callable, Node** args,
                   size_t result_size = 1);
 
@@ -364,8 +383,13 @@
                  const Arg& arg3, const Arg& arg4, const Arg& arg5,
                  size_t result_size = 1);
 
+  Node* CallStubN(const CallInterfaceDescriptor& descriptor,
+                  int js_parameter_count, Node* target, Node** args,
+                  size_t result_size = 1);
   Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
-                  Node** args, size_t result_size = 1);
+                  Node** args, size_t result_size = 1) {
+    return CallStubN(descriptor, 0, target, args, result_size);
+  }
 
   Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
                      size_t result_size = 1);
@@ -387,6 +411,9 @@
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, Node* arg1, Node* arg2, Node* arg3,
                      Node* arg4, size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2, Node* arg3,
+                     Node* arg4, Node* arg5, size_t result_size = 1);
 
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, const Arg& arg1, const Arg& arg2,
@@ -406,6 +433,11 @@
   Node* CallJS(Callable const& callable, Node* context, Node* function,
                Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
 
+  // Call to a C function with two arguments.
+  Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
+                       MachineType arg1_type, Node* function, Node* arg0,
+                       Node* arg1);
+
   // Exception handling support.
   void GotoIfException(Node* node, Label* if_exception,
                        Variable* exception_var = nullptr);
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 4dccdc9..8bf3a9e 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -170,15 +170,17 @@
 // Eager deoptimization exit.
 class DeoptimizationExit : public ZoneObject {
  public:
-  explicit DeoptimizationExit(int deoptimization_id)
-      : deoptimization_id_(deoptimization_id) {}
+  explicit DeoptimizationExit(int deoptimization_id, SourcePosition pos)
+      : deoptimization_id_(deoptimization_id), pos_(pos) {}
 
   int deoptimization_id() const { return deoptimization_id_; }
   Label* label() { return &label_; }
+  SourcePosition pos() const { return pos_; }
 
  private:
   int const deoptimization_id_;
   Label label_;
+  SourcePosition const pos_;
 };
 
 // Generator for out-of-line code that is emitted after the main code is done.
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 03136a7..043582b 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -6,6 +6,7 @@
 
 #include "src/address-map.h"
 #include "src/base/adapters.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline.h"
@@ -63,6 +64,8 @@
   CreateFrameAccessState(frame);
 }
 
+Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
+
 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
   FinishFrame(frame);
   frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
@@ -185,7 +188,8 @@
   // Assemble all eager deoptimization exits.
   for (DeoptimizationExit* exit : deoptimization_exits_) {
     masm()->bind(exit->label());
-    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER);
+    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
+                            exit->pos());
   }
 
   // Ensure there is space for lazy deoptimization in the code.
@@ -805,7 +809,7 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
-    } else if (type.representation() == MachineRepresentation::kTagged) {
+    } else if (IsAnyTagged(type.representation())) {
       translation->StoreStackSlot(LocationOperand::cast(op)->index());
     } else {
       CHECK(false);
@@ -827,7 +831,7 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32Register(converter.ToRegister(op));
-    } else if (type.representation() == MachineRepresentation::kTagged) {
+    } else if (IsAnyTagged(type.representation())) {
       translation->StoreRegister(converter.ToRegister(op));
     } else {
       CHECK(false);
@@ -846,7 +850,8 @@
     Handle<Object> constant_object;
     switch (constant.type()) {
       case Constant::kInt32:
-        if (type.representation() == MachineRepresentation::kTagged) {
+        if (type.representation() == MachineRepresentation::kTagged ||
+            type.representation() == MachineRepresentation::kTaggedSigned) {
           // When pointers are 4 bytes, we can use int32 constants to represent
           // Smis.
           DCHECK_EQ(4, kPointerSize);
@@ -868,24 +873,33 @@
       case Constant::kInt64:
         // When pointers are 8 bytes, we can use int64 constants to represent
         // Smis.
-        DCHECK_EQ(type.representation(), MachineRepresentation::kTagged);
+        DCHECK(type.representation() == MachineRepresentation::kTagged ||
+               type.representation() == MachineRepresentation::kTaggedSigned);
         DCHECK_EQ(8, kPointerSize);
         constant_object =
             handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
         DCHECK(constant_object->IsSmi());
         break;
       case Constant::kFloat32:
-        DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
-               type.representation() == MachineRepresentation::kTagged);
+        if (type.representation() == MachineRepresentation::kTaggedSigned) {
+          DCHECK(IsSmiDouble(constant.ToFloat32()));
+        } else {
+          DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+                 CanBeTaggedPointer(type.representation()));
+        }
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
         break;
       case Constant::kFloat64:
-        DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
-               type.representation() == MachineRepresentation::kTagged);
+        if (type.representation() == MachineRepresentation::kTaggedSigned) {
+          DCHECK(IsSmiDouble(constant.ToFloat64()));
+        } else {
+          DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+                 CanBeTaggedPointer(type.representation()));
+        }
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
-        DCHECK(type.representation() == MachineRepresentation::kTagged);
+        DCHECK(CanBeTaggedPointer(type.representation()));
         constant_object = constant.ToHeapObject();
         break;
       default:
@@ -911,8 +925,8 @@
     Instruction* instr, size_t frame_state_offset) {
   int const deoptimization_id = BuildTranslation(
       instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
-  DeoptimizationExit* const exit =
-      new (zone()) DeoptimizationExit(deoptimization_id);
+  DeoptimizationExit* const exit = new (zone())
+      DeoptimizationExit(deoptimization_id, current_source_position_);
   deoptimization_exits_.push_back(exit);
   return exit;
 }
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 21c13f8..3032163 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_H_
 #define V8_COMPILER_CODE_GENERATOR_H_
 
-#include "src/compiler.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/unwinding-info-writer.h"
@@ -16,6 +15,9 @@
 
 namespace v8 {
 namespace internal {
+
+class CompilationInfo;
+
 namespace compiler {
 
 // Forward declarations.
@@ -58,7 +60,7 @@
   InstructionSequence* code() const { return code_; }
   FrameAccessState* frame_access_state() const { return frame_access_state_; }
   const Frame* frame() const { return frame_access_state_->frame(); }
-  Isolate* isolate() const { return info_->isolate(); }
+  Isolate* isolate() const;
   Linkage* linkage() const { return linkage_; }
 
   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
@@ -118,7 +120,8 @@
   void AssembleArchTableSwitch(Instruction* instr);
 
   CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
-                                        Deoptimizer::BailoutType bailout_type);
+                                        Deoptimizer::BailoutType bailout_type,
+                                        SourcePosition pos);
 
   // Generates an architecture-specific, descriptor-specific prologue
   // to set up a stack frame.
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 9527c75..c5ced20 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -77,8 +77,12 @@
   // Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
   // to BooleanNot as new condition for {branch}. Note we assume that {cond} was
   // already properly optimized before we get here (as guaranteed by the graph
-  // reduction logic).
-  if (cond->opcode() == IrOpcode::kBooleanNot) {
+  // reduction logic). The same applies if {cond} is a Select acting as boolean
+  // not (i.e. true being returned in the false case and vice versa).
+  if (cond->opcode() == IrOpcode::kBooleanNot ||
+      (cond->opcode() == IrOpcode::kSelect &&
+       DecideCondition(cond->InputAt(1)) == Decision::kFalse &&
+       DecideCondition(cond->InputAt(2)) == Decision::kTrue)) {
     for (Node* const use : node->uses()) {
       switch (use->opcode()) {
         case IrOpcode::kIfTrue:
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index f732375..e57160a 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -10,7 +10,7 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/handles-inl.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 9e4d259..2db0bfa 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -9,7 +9,7 @@
 #include "src/compiler/frame-states.h"
 #include "src/deoptimize-reason.h"
 #include "src/machine-type.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -19,7 +19,7 @@
 class CallDescriptor;
 struct CommonOperatorGlobalCache;
 class Operator;
-
+class Type;
 
 // Prediction hint for branches.
 enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
index 478e48b..4fb9c27 100644
--- a/src/compiler/control-equivalence.h
+++ b/src/compiler/control-equivalence.h
@@ -7,7 +7,7 @@
 
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/control-flow-optimizer.h b/src/compiler/control-flow-optimizer.h
index f72fa58..61785a0 100644
--- a/src/compiler/control-flow-optimizer.h
+++ b/src/compiler/control-flow-optimizer.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
 
 #include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index 9cc6ddc..4e53e5d 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -265,7 +265,6 @@
     Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
     if (phi->UseCount() == 0) {
       DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
-      DCHECK_EQ(input_count, block->SuccessorCount());
     } else {
       for (Edge edge : phi->use_edges()) {
         Node* control = NodeProperties::GetControlInput(edge.from());
@@ -616,6 +615,9 @@
     case IrOpcode::kChangeTaggedToFloat64:
       state = LowerChangeTaggedToFloat64(node, *effect, *control);
       break;
+    case IrOpcode::kTruncateTaggedToBit:
+      state = LowerTruncateTaggedToBit(node, *effect, *control);
+      break;
     case IrOpcode::kTruncateTaggedToFloat64:
       state = LowerTruncateTaggedToFloat64(node, *effect, *control);
       break;
@@ -634,11 +636,8 @@
     case IrOpcode::kCheckIf:
       state = LowerCheckIf(node, frame_state, *effect, *control);
       break;
-    case IrOpcode::kCheckTaggedPointer:
-      state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
-      break;
-    case IrOpcode::kCheckTaggedSigned:
-      state = LowerCheckTaggedSigned(node, frame_state, *effect, *control);
+    case IrOpcode::kCheckHeapObject:
+      state = LowerCheckHeapObject(node, frame_state, *effect, *control);
       break;
     case IrOpcode::kCheckedInt32Add:
       state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
@@ -661,9 +660,17 @@
     case IrOpcode::kCheckedInt32Mul:
       state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckedInt32ToTaggedSigned:
+      state =
+          LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+      break;
     case IrOpcode::kCheckedUint32ToInt32:
       state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckedUint32ToTaggedSigned:
+      state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
+                                               *control);
+      break;
     case IrOpcode::kCheckedFloat64ToInt32:
       state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
       break;
@@ -677,6 +684,10 @@
     case IrOpcode::kCheckedTaggedToFloat64:
       state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckedTaggedToTaggedSigned:
+      state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
+                                               *control);
+      break;
     case IrOpcode::kTruncateTaggedToWord32:
       state = LowerTruncateTaggedToWord32(node, *effect, *control);
       break;
@@ -702,12 +713,27 @@
     case IrOpcode::kObjectIsUndetectable:
       state = LowerObjectIsUndetectable(node, *effect, *control);
       break;
+    case IrOpcode::kArrayBufferWasNeutered:
+      state = LowerArrayBufferWasNeutered(node, *effect, *control);
+      break;
     case IrOpcode::kStringFromCharCode:
       state = LowerStringFromCharCode(node, *effect, *control);
       break;
+    case IrOpcode::kStringFromCodePoint:
+      state = LowerStringFromCodePoint(node, *effect, *control);
+      break;
     case IrOpcode::kStringCharCodeAt:
       state = LowerStringCharCodeAt(node, *effect, *control);
       break;
+    case IrOpcode::kStringEqual:
+      state = LowerStringEqual(node, *effect, *control);
+      break;
+    case IrOpcode::kStringLessThan:
+      state = LowerStringLessThan(node, *effect, *control);
+      break;
+    case IrOpcode::kStringLessThanOrEqual:
+      state = LowerStringLessThanOrEqual(node, *effect, *control);
+      break;
     case IrOpcode::kCheckFloat64Hole:
       state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
       break;
@@ -762,75 +788,8 @@
 EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
                                                     Node* control) {
-  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
-
-  Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
-  Node* check_same = graph()->NewNode(
-      machine()->Float64Equal(), value,
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
-  Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
-
-  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
-  Node* vsmi;
-  Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
-
-  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
-    // Check if {value} is -0.
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                        jsgraph()->Int32Constant(0));
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, if_smi);
-
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
-
-    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-    Node* check_negative = graph()->NewNode(
-        machine()->Int32LessThan(),
-        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-        jsgraph()->Int32Constant(0));
-    Node* branch_negative = graph()->NewNode(
-        common()->Branch(BranchHint::kFalse), check_negative, if_zero);
-
-    Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
-    Node* if_notnegative =
-        graph()->NewNode(common()->IfFalse(), branch_negative);
-
-    // We need to create a box for negative 0.
-    if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
-    if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
-  }
-
-  // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
-  // machines we need to deal with potential overflow and fallback to boxing.
-  if (machine()->Is64()) {
-    vsmi = ChangeInt32ToSmi(value32);
-  } else {
-    Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
-                                     value32, if_smi);
-
-    Node* check_ovf =
-        graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
-    Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check_ovf, if_smi);
-
-    Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
-    if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
-
-    if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
-    vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
-  }
-
-  // Allocate the box for the {value}.
-  ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
-
-  control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vsmi, box.value, control);
-  effect =
-      graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
-  return ValueEffectControl(value, effect, control);
+  return AllocateHeapNumberWithValue(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
@@ -939,6 +898,157 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
+                                                  Node* control) {
+  Node* value = node->InputAt(0);
+  Node* one = jsgraph()->Int32Constant(1);
+  Node* zero = jsgraph()->Int32Constant(0);
+  Node* fzero = jsgraph()->Float64Constant(0.0);
+
+  // Collect effect/control/value triples.
+  int count = 0;
+  Node* values[7];
+  Node* effects[7];
+  Node* controls[6];
+
+  // Check if {value} is a Smi.
+  Node* check_smi = ObjectIsSmi(value);
+  Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                      check_smi, control);
+
+  // If {value} is a Smi, then we only need to check that it's not zero.
+  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
+  Node* esmi = effect;
+  {
+    controls[count] = if_smi;
+    effects[count] = esmi;
+    values[count] =
+        graph()->NewNode(machine()->Word32Equal(),
+                         graph()->NewNode(machine()->WordEqual(), value,
+                                          jsgraph()->ZeroConstant()),
+                         zero);
+    count++;
+  }
+  control = graph()->NewNode(common()->IfFalse(), branch_smi);
+
+  // Load the map instance type of {value}.
+  Node* value_map = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+  Node* value_instance_type = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+      effect, control);
+
+  // Check if {value} is an Oddball.
+  Node* check_oddball =
+      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
+                       jsgraph()->Int32Constant(ODDBALL_TYPE));
+  Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                          check_oddball, control);
+
+  // The only Oddball {value} that is trueish is true itself.
+  Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
+  Node* eoddball = effect;
+  {
+    controls[count] = if_oddball;
+    effects[count] = eoddball;
+    values[count] = graph()->NewNode(machine()->WordEqual(), value,
+                                     jsgraph()->TrueConstant());
+    count++;
+  }
+  control = graph()->NewNode(common()->IfFalse(), branch_oddball);
+
+  // Check if {value} is a String.
+  Node* check_string =
+      graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
+                       jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
+  Node* branch_string =
+      graph()->NewNode(common()->Branch(), check_string, control);
+
+  // For String {value}, we need to check that the length is not zero.
+  Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
+  Node* estring = effect;
+  {
+    // Load the {value} length.
+    Node* value_length = estring = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForStringLength()), value,
+        estring, if_string);
+
+    controls[count] = if_string;
+    effects[count] = estring;
+    values[count] =
+        graph()->NewNode(machine()->Word32Equal(),
+                         graph()->NewNode(machine()->WordEqual(), value_length,
+                                          jsgraph()->ZeroConstant()),
+                         zero);
+    count++;
+  }
+  control = graph()->NewNode(common()->IfFalse(), branch_string);
+
+  // Check if {value} is a HeapNumber.
+  Node* check_heapnumber =
+      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
+                       jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
+  Node* branch_heapnumber =
+      graph()->NewNode(common()->Branch(), check_heapnumber, control);
+
+  // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
+  Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
+  Node* eheapnumber = effect;
+  {
+    // Load the raw value of {value}.
+    Node* value_value = eheapnumber = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        eheapnumber, if_heapnumber);
+
+    // Check if {value} is either less than 0.0 or greater than 0.0.
+    Node* check =
+        graph()->NewNode(machine()->Float64LessThan(), fzero, value_value);
+    Node* branch = graph()->NewNode(common()->Branch(), check, if_heapnumber);
+
+    controls[count] = graph()->NewNode(common()->IfTrue(), branch);
+    effects[count] = eheapnumber;
+    values[count] = one;
+    count++;
+
+    controls[count] = graph()->NewNode(common()->IfFalse(), branch);
+    effects[count] = eheapnumber;
+    values[count] =
+        graph()->NewNode(machine()->Float64LessThan(), value_value, fzero);
+    count++;
+  }
+  control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
+
+  // The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
+  // those cases we can just the undetectable bit on the map, which will only
+  // be set for certain JSReceivers, i.e. document.all.
+  {
+    // Load the {value} map bit field.
+    Node* value_map_bitfield = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+        effect, control);
+
+    controls[count] = control;
+    effects[count] = effect;
+    values[count] = graph()->NewNode(
+        machine()->Word32Equal(),
+        graph()->NewNode(machine()->Word32And(), value_map_bitfield,
+                         jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
+        zero);
+    count++;
+  }
+
+  // Merge the different controls.
+  control = graph()->NewNode(common()->Merge(count), count, controls);
+  effects[count] = control;
+  effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
+  values[count] = control;
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
+                           count + 1, values);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
                                                   Node* control) {
   Node* value = node->InputAt(0);
@@ -1164,8 +1274,8 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
-                                                 Node* effect, Node* control) {
+EffectControlLinearizer::LowerCheckHeapObject(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
@@ -1177,19 +1287,6 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
-                                                Node* effect, Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
-                       check, frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
                                               Node* effect, Node* control) {
   Node* lhs = node->InputAt(0);
@@ -1515,6 +1612,27 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
+                                                         Node* frame_state,
+                                                         Node* effect,
+                                                         Node* control) {
+  DCHECK(SmiValuesAre31Bits());
+  Node* value = node->InputAt(0);
+
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
+                               control);
+
+  Node* check = graph()->NewNode(common()->Projection(1), add, control);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+                       check, frame_state, effect, control);
+
+  value = graph()->NewNode(common()->Projection(0), add, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
                                                    Node* frame_state,
                                                    Node* effect,
@@ -1531,6 +1649,22 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
+                                                          Node* frame_state,
+                                                          Node* effect,
+                                                          Node* control) {
+  Node* value = node->InputAt(0);
+  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+                                 SmiMaxValueConstant());
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+      frame_state, effect, control);
+  value = ChangeUint32ToSmi(value);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
                                                     Node* value,
                                                     Node* frame_state,
@@ -1667,8 +1801,8 @@
       break;
     }
     case CheckTaggedInputMode::kNumberOrOddball: {
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check_number, control);
+      Node* branch =
+          graph()->NewNode(common()->Branch(), check_number, control);
 
       Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
       Node* etrue = effect;
@@ -1710,8 +1844,7 @@
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  Node* branch = graph()->NewNode(common()->Branch(), check, control);
 
   // In the Smi case, just convert to int32 and then float64.
   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -1736,6 +1869,21 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
+                                                          Node* frame_state,
+                                                          Node* effect,
+                                                          Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+                       check, frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
                                                      Node* control) {
   Node* value = node->InputAt(0);
@@ -1996,6 +2144,26 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
+                                                     Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* value_bit_field = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
+      effect, control);
+  value = graph()->NewNode(
+      machine()->Word32Equal(),
+      graph()->NewNode(machine()->Word32Equal(),
+                       graph()->NewNode(machine()->Word32And(), value_bit_field,
+                                        jsgraph()->Int32Constant(
+                                            JSArrayBuffer::WasNeutered::kMask)),
+                       jsgraph()->Int32Constant(0)),
+      jsgraph()->Int32Constant(0));
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
                                                Node* control) {
   Node* subject = node->InputAt(0);
@@ -2382,6 +2550,236 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
+                                                  Node* control) {
+  Node* value = node->InputAt(0);
+  Node* code = value;
+
+  Node* etrue0 = effect;
+  Node* vtrue0;
+
+  // Check if the {code} is a single code unit
+  Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
+                                  jsgraph()->Uint32Constant(0xFFFF));
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  {
+    // Check if the {code} is a one byte character
+    Node* check1 = graph()->NewNode(
+        machine()->Uint32LessThanOrEqual(), code,
+        jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
+    Node* branch1 =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = etrue0;
+    Node* vtrue1;
+    {
+      // Load the isolate wide single character string cache.
+      Node* cache =
+          jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+      // Compute the {cache} index for {code}.
+      Node* index =
+          machine()->Is32()
+              ? code
+              : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+
+      // Check if we have an entry for the {code} in the single character string
+      // cache already.
+      Node* entry = etrue1 = graph()->NewNode(
+          simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+          cache, index, etrue1, if_true1);
+
+      Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
+                                      jsgraph()->UndefinedConstant());
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_true1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* etrue2 = etrue1;
+      Node* vtrue2;
+      {
+        // Allocate a new SeqOneByteString for {code}.
+        vtrue2 = etrue2 = graph()->NewNode(
+            simplified()->Allocate(NOT_TENURED),
+            jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
+            if_true2);
+        etrue2 = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
+            jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
+            if_true2);
+        etrue2 = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
+            jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
+        etrue2 = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
+            jsgraph()->SmiConstant(1), etrue2, if_true2);
+        etrue2 = graph()->NewNode(
+            machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
+                                                 kNoWriteBarrier)),
+            vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                              kHeapObjectTag),
+            code, etrue2, if_true2);
+
+        // Remember it in the {cache}.
+        etrue2 = graph()->NewNode(
+            simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
+            cache, index, vtrue2, etrue2, if_true2);
+      }
+
+      // Use the {entry} from the {cache}.
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* efalse2 = etrue0;
+      Node* vfalse2 = entry;
+
+      if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      etrue1 =
+          graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
+      vtrue1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue2, vfalse2, if_true1);
+    }
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = effect;
+    Node* vfalse1;
+    {
+      // Allocate a new SeqTwoByteString for {code}.
+      vfalse1 = efalse1 = graph()->NewNode(
+          simplified()->Allocate(NOT_TENURED),
+          jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
+          if_false1);
+      efalse1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
+          jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
+      efalse1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
+          jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
+      efalse1 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
+          jsgraph()->SmiConstant(1), efalse1, if_false1);
+      efalse1 = graph()->NewNode(
+          machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
+                                               kNoWriteBarrier)),
+          vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                             kHeapObjectTag),
+          code, efalse1, if_false1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    etrue0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  // Generate surrogate pair string
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    switch (UnicodeEncodingOf(node->op())) {
+      case UnicodeEncoding::UTF16:
+        break;
+
+      case UnicodeEncoding::UTF32: {
+        // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+        Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+
+        // lead = (codepoint >> 10) + LEAD_OFFSET
+        Node* lead =
+            graph()->NewNode(machine()->Int32Add(),
+                             graph()->NewNode(machine()->Word32Shr(), code,
+                                              jsgraph()->Int32Constant(10)),
+                             lead_offset);
+
+        // trail = (codepoint & 0x3FF) + 0xDC00;
+        Node* trail =
+            graph()->NewNode(machine()->Int32Add(),
+                             graph()->NewNode(machine()->Word32And(), code,
+                                              jsgraph()->Int32Constant(0x3FF)),
+                             jsgraph()->Int32Constant(0xDC00));
+
+        // codpoint = (trail << 16) | lead;
+        code = graph()->NewNode(machine()->Word32Or(),
+                                graph()->NewNode(machine()->Word32Shl(), trail,
+                                                 jsgraph()->Int32Constant(16)),
+                                lead);
+        break;
+      }
+    }
+
+    // Allocate a new SeqTwoByteString for {code}.
+    vfalse0 = efalse0 =
+        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                         jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
+                         efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
+        jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
+        jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
+        jsgraph()->SmiConstant(2), efalse0, if_false0);
+    efalse0 = graph()->NewNode(
+        machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
+                                             kNoWriteBarrier)),
+        vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                           kHeapObjectTag),
+        code, efalse0, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue0, vfalse0, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+                                               Node* node, Node* effect,
+                                               Node* control) {
+  Operator::Properties properties = Operator::kEliminatable;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  node->InsertInput(graph()->zone(), 0,
+                    jsgraph()->HeapConstant(callable.code()));
+  node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
+  node->AppendInput(graph()->zone(), effect);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+  return ValueEffectControl(node, node, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
+                                          Node* control) {
+  return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
+                               effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
+                                             Node* control) {
+  return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
+                               effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
+                                                    Node* control) {
+  return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
+                               node, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
                                                Node* effect, Node* control) {
   // If we reach this point w/o eliminating the {node} that's marked
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 98f08c7..0199fd0 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -12,6 +12,8 @@
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class Callable;
 class Zone;
 
 namespace compiler {
@@ -71,10 +73,8 @@
                                       Node* effect, Node* control);
   ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
                                   Node* control);
-  ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
-                                             Node* effect, Node* control);
-  ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
-                                            Node* effect, Node* control);
+  ValueEffectControl LowerCheckHeapObject(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
                                           Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
@@ -89,8 +89,16 @@
                                            Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
                                           Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
+                                                     Node* frame_state,
+                                                     Node* effect,
+                                                     Node* control);
   ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
                                                Node* effect, Node* control);
+  ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
+                                                      Node* frame_state,
+                                                      Node* effect,
+                                                      Node* control);
   ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
                                                 Node* effect, Node* control);
   ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
@@ -101,8 +109,14 @@
                                                Node* effect, Node* control);
   ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
                                                  Node* effect, Node* control);
+  ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
+                                                      Node* frame_state,
+                                                      Node* effect,
+                                                      Node* control);
   ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
                                                 Node* control);
+  ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
+                                              Node* control);
   ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
                                                   Node* control);
   ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
@@ -122,10 +136,19 @@
                                          Node* control);
   ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
                                                Node* control);
+  ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
+                                                 Node* control);
   ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
                                            Node* control);
   ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
                                              Node* control);
+  ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
+                                              Node* control);
+  ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
+  ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
+                                         Node* control);
+  ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
+                                                Node* control);
   ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
                                            Node* effect, Node* control);
   ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
@@ -165,6 +188,8 @@
   ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
       CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
       Node* control);
+  ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
+                                           Node* effect, Node* control);
 
   Node* ChangeInt32ToSmi(Node* value);
   Node* ChangeUint32ToSmi(Node* value);
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index c69b86c..d997813 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -97,6 +97,22 @@
   return NoChange();
 }
 
+namespace {
+
+Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+  // We might need to guard the replacement if the type of the {replacement}
+  // node is not in a sub-type relation to the type of the the {original} node.
+  Type* const replacement_type = NodeProperties::GetType(replacement);
+  Type* const original_type = NodeProperties::GetType(original);
+  if (!replacement_type->Is(original_type)) {
+    Node* const control = NodeProperties::GetControlInput(original);
+    replacement = jsgraph->graph()->NewNode(
+        jsgraph->common()->TypeGuard(original_type), replacement, control);
+  }
+  return replacement;
+}
+
+}  // namespace
 
 Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kLoadField ||
@@ -104,12 +120,15 @@
   if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
     fully_reduced_.Add(node->id());
   }
-  if (Node* rep = escape_analysis()->GetReplacement(node)) {
-    isolate()->counters()->turbo_escape_loads_replaced()->Increment();
-    TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
-          node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
-    ReplaceWithValue(node, rep);
-    return Replace(rep);
+  if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+    if (Node* rep = escape_analysis()->GetReplacement(node)) {
+      isolate()->counters()->turbo_escape_loads_replaced()->Increment();
+      TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+            node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
+      rep = MaybeGuard(jsgraph(), node, rep);
+      ReplaceWithValue(node, rep);
+      return Replace(rep);
+    }
   }
   return NoChange();
 }
@@ -305,6 +324,11 @@
   if (input->opcode() == IrOpcode::kFinishRegion ||
       input->opcode() == IrOpcode::kAllocate) {
     if (escape_analysis()->IsVirtual(input)) {
+      if (escape_analysis()->IsCyclicObjectState(effect, input)) {
+        // TODO(mstarzinger): Represent cyclic object states differently to
+        // ensure the scheduler can properly handle such object states.
+        FATAL("Cyclic object state detected by escape analysis.");
+      }
       if (Node* object_state =
               escape_analysis()->GetOrCreateObjectState(effect, input)) {
         if (node_multiused || (multiple_users && !already_cloned)) {
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index 437c01f..3f889cc 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -12,13 +12,13 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -795,8 +795,16 @@
       case IrOpcode::kSelect:
       // TODO(mstarzinger): The following list of operators will eventually be
       // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+      case IrOpcode::kStringEqual:
+      case IrOpcode::kStringLessThan:
+      case IrOpcode::kStringLessThanOrEqual:
+      case IrOpcode::kPlainPrimitiveToNumber:
+      case IrOpcode::kPlainPrimitiveToWord32:
+      case IrOpcode::kPlainPrimitiveToFloat64:
+      case IrOpcode::kStringCharCodeAt:
       case IrOpcode::kObjectIsCallable:
       case IrOpcode::kObjectIsNumber:
+      case IrOpcode::kObjectIsReceiver:
       case IrOpcode::kObjectIsString:
       case IrOpcode::kObjectIsUndetectable:
         if (SetEscaped(rep)) {
@@ -853,6 +861,7 @@
       status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
       virtual_states_(zone),
       replacements_(zone),
+      cycle_detection_(zone),
       cache_(nullptr) {}
 
 EscapeAnalysis::~EscapeAnalysis() {}
@@ -1456,13 +1465,13 @@
     int offset = OffsetForFieldAccess(node);
     if (static_cast<size_t>(offset) >= object->field_count()) return;
     Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
-    // TODO(mstarzinger): The following is a workaround to not track the code
-    // entry field in virtual JSFunction objects. We only ever store the inner
-    // pointer into the compile lazy stub in this field and the deoptimizer has
-    // this assumption hard-coded in {TranslatedState::MaterializeAt} as well.
+    // TODO(mstarzinger): The following is a workaround to not track some well
+    // known raw fields. We only ever store default initial values into these
+    // fields which are hard-coded in {TranslatedState::MaterializeAt} as well.
     if (val->opcode() == IrOpcode::kInt32Constant ||
         val->opcode() == IrOpcode::kInt64Constant) {
-      DCHECK_EQ(JSFunction::kCodeEntryOffset, FieldAccessOf(node->op()).offset);
+      DCHECK(FieldAccessOf(node->op()).offset == JSFunction::kCodeEntryOffset ||
+             FieldAccessOf(node->op()).offset == Name::kHashFieldOffset);
       val = slot_not_analyzed_;
     }
     if (object->GetField(offset) != val) {
@@ -1557,6 +1566,27 @@
   return nullptr;
 }
 
+bool EscapeAnalysis::IsCyclicObjectState(Node* effect, Node* node) {
+  if ((node->opcode() == IrOpcode::kFinishRegion ||
+       node->opcode() == IrOpcode::kAllocate) &&
+      IsVirtual(node)) {
+    if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
+                                               ResolveReplacement(node))) {
+      if (cycle_detection_.find(vobj) != cycle_detection_.end()) return true;
+      cycle_detection_.insert(vobj);
+      bool cycle_detected = false;
+      for (size_t i = 0; i < vobj->field_count(); ++i) {
+        if (Node* field = vobj->GetField(i)) {
+          if (IsCyclicObjectState(effect, field)) cycle_detected = true;
+        }
+      }
+      cycle_detection_.erase(vobj);
+      return cycle_detected;
+    }
+  }
+  return false;
+}
+
 void EscapeAnalysis::DebugPrintState(VirtualState* state) {
   PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
   for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index 839e54c..ec5154e 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -32,6 +32,7 @@
   bool IsEscaped(Node* node);
   bool CompareVirtualObjects(Node* left, Node* right);
   Node* GetOrCreateObjectState(Node* effect, Node* node);
+  bool IsCyclicObjectState(Node* effect, Node* node);
   bool ExistsVirtualAllocate();
 
  private:
@@ -75,6 +76,7 @@
   EscapeStatusAnalysis* status_analysis_;
   ZoneVector<VirtualState*> virtual_states_;
   ZoneVector<Node*> replacements_;
+  ZoneSet<VirtualObject*> cycle_detection_;
   MergeCache* cache_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index 2ac60a6..a089c12 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_GRAPH_REDUCER_H_
 
 #include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 9fd80ea..d810c37 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -9,7 +9,7 @@
 #include <string>
 
 #include "src/code-stubs.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/compiler/all-nodes.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node-properties.h"
@@ -239,7 +239,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone tmp_zone(&allocator);
   os << "{\n\"nodes\":[";
   JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
@@ -629,7 +629,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
   return os;
@@ -637,7 +637,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone)
       .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
@@ -647,7 +647,7 @@
 
 std::ostream& operator<<(std::ostream& os,
                          const AsC1VRegisterAllocationData& ac) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
   return os;
@@ -658,7 +658,7 @@
 const int kVisited = 2;
 
 std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone local_zone(&allocator);
 
   // Do a post-order depth-first search on the RPO graph. For every node,
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index a694a0b..1d9e85e 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_GRAPH_H_
 #define V8_COMPILER_GRAPH_H_
 
-#include "src/zone.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index ad1a992..428570a 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -4,7 +4,7 @@
 
 #include "src/compiler/code-generator.h"
 
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -637,9 +637,6 @@
     case kArchDebugBreak:
       __ int3();
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -649,8 +646,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1786,13 +1783,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 1c62de5..ad7535c 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -28,8 +28,6 @@
     case kIA32Imul:
     case kIA32ImulHigh:
     case kIA32UmulHigh:
-    case kIA32Idiv:
-    case kIA32Udiv:
     case kIA32Not:
     case kIA32Neg:
     case kIA32Shl:
@@ -103,6 +101,12 @@
           ? kNoOpcodeFlags
           : kIsLoadOperation | kHasSideEffect;
 
+    case kIA32Idiv:
+    case kIA32Udiv:
+      return (instr->addressing_mode() == kMode_None)
+                 ? kMayNeedDeoptCheck
+                 : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+
     case kIA32Movsxbl:
     case kIA32Movzxbl:
     case kIA32Movb:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 4a1e19b..7e98023 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -250,6 +250,10 @@
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   IA32OperandGenerator g(this);
@@ -262,7 +266,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index c6689d8..22279fe 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -57,7 +57,6 @@
   V(ArchTableSwitch)                      \
   V(ArchNop)                              \
   V(ArchDebugBreak)                       \
-  V(ArchImpossible)                       \
   V(ArchComment)                          \
   V(ArchThrowTerminator)                  \
   V(ArchDeoptimize)                       \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index 2e10794..c7fd1cc 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -11,11 +11,16 @@
 namespace internal {
 namespace compiler {
 
-// Compare the two nodes and return true if node1 is a better candidate than
-// node2 (i.e. node1 should be scheduled before node2).
-bool InstructionScheduler::CriticalPathFirstQueue::CompareNodes(
-    ScheduleGraphNode *node1, ScheduleGraphNode *node2) const {
-  return node1->total_latency() > node2->total_latency();
+void InstructionScheduler::SchedulingQueueBase::AddNode(
+    ScheduleGraphNode* node) {
+  // We keep the ready list sorted by total latency so that we can quickly find
+  // the next best candidate to schedule.
+  auto it = nodes_.begin();
+  while ((it != nodes_.end()) &&
+         ((*it)->total_latency() >= node->total_latency())) {
+    ++it;
+  }
+  nodes_.insert(it, node);
 }
 
 
@@ -24,12 +29,10 @@
   DCHECK(!IsEmpty());
   auto candidate = nodes_.end();
   for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
-    // We only consider instructions that have all their operands ready and
-    // we try to schedule the critical path first.
+    // We only consider instructions that have all their operands ready.
     if (cycle >= (*iterator)->start_cycle()) {
-      if ((candidate == nodes_.end()) || CompareNodes(*iterator, *candidate)) {
-        candidate = iterator;
-      }
+      candidate = iterator;
+      break;
     }
   }
 
@@ -133,9 +136,9 @@
       last_live_in_reg_marker_->AddSuccessor(new_node);
     }
 
-    // Make sure that new instructions are not scheduled before the last
-    // deoptimization point.
-    if (last_deopt_ != nullptr) {
+    // Make sure that instructions are not scheduled before the last
+    // deoptimization point when they depend on it.
+    if ((last_deopt_ != nullptr) && DependsOnDeoptimization(instr)) {
       last_deopt_->AddSuccessor(new_node);
     }
 
@@ -242,7 +245,6 @@
     case kArchTruncateDoubleToI:
     case kArchStackSlot:
     case kArchDebugBreak:
-    case kArchImpossible:
     case kArchComment:
     case kIeee754Float64Acos:
     case kIeee754Float64Acosh:
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 271aa0d..7660520 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
 
 #include "src/compiler/instruction.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -21,9 +21,12 @@
   kHasSideEffect = 2,      // The instruction has some side effects (memory
                            // store, function call...)
   kIsLoadOperation = 4,    // The instruction is a memory load.
+  kMayNeedDeoptCheck = 8,  // The instruction might be associated with a deopt
+                           // check. This is the case of instruction which can
+                           // blow up with particular inputs (e.g.: division by
+                           // zero on Intel platforms).
 };
 
-
 class InstructionScheduler final : public ZoneObject {
  public:
   InstructionScheduler(Zone* zone, InstructionSequence* sequence);
@@ -101,9 +104,7 @@
         nodes_(scheduler->zone()) {
     }
 
-    void AddNode(ScheduleGraphNode* node) {
-      nodes_.push_back(node);
-    }
+    void AddNode(ScheduleGraphNode* node);
 
     bool IsEmpty() const {
       return nodes_.empty();
@@ -125,11 +126,6 @@
     // Look for the best candidate to schedule, remove it from the queue and
     // return it.
     ScheduleGraphNode* PopBestCandidate(int cycle);
-
-   private:
-    // Compare the two nodes and return true if node1 is a better candidate than
-    // node2 (i.e. node1 should be scheduled before node2).
-    bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
   };
 
   // A queue which pop a random node from the queue to perform stress tests on
@@ -162,12 +158,25 @@
   // Check whether the given instruction has side effects (e.g. function call,
   // memory store).
   bool HasSideEffect(const Instruction* instr) const {
-    return GetInstructionFlags(instr) & kHasSideEffect;
+    return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
   }
 
   // Return true if the instruction is a memory load.
   bool IsLoadOperation(const Instruction* instr) const {
-    return GetInstructionFlags(instr) & kIsLoadOperation;
+    return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
+  }
+
+  // Return true if this instruction is usually associated with a deopt check
+  // to validate its input.
+  bool MayNeedDeoptCheck(const Instruction* instr) const {
+    return (GetInstructionFlags(instr) & kMayNeedDeoptCheck) != 0;
+  }
+
+  // Return true if the instruction cannot be moved before the last deopt
+  // point we encountered.
+  bool DependsOnDeoptimization(const Instruction* instr) const {
+    return MayNeedDeoptCheck(instr) || instr->IsDeoptimizeCall() ||
+           HasSideEffect(instr) || IsLoadOperation(instr);
   }
 
   // Identify nops used as a definition point for live-in registers at
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 25d8a99..673d1b0 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -90,6 +90,12 @@
                                         GetVReg(node)));
   }
 
+  InstructionOperand UseAnyAtEnd(Node* node) {
+    return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+                                        UnallocatedOperand::USED_AT_END,
+                                        GetVReg(node)));
+  }
+
   InstructionOperand UseAny(Node* node) {
     return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
                                         UnallocatedOperand::USED_AT_START,
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index ac8e64a..b150725 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -22,7 +22,9 @@
     Zone* zone, size_t node_count, Linkage* linkage,
     InstructionSequence* sequence, Schedule* schedule,
     SourcePositionTable* source_positions, Frame* frame,
-    SourcePositionMode source_position_mode, Features features)
+    SourcePositionMode source_position_mode, Features features,
+    EnableScheduling enable_scheduling,
+    EnableSerialization enable_serialization)
     : zone_(zone),
       linkage_(linkage),
       sequence_(sequence),
@@ -37,13 +39,16 @@
       effect_level_(node_count, 0, zone),
       virtual_registers_(node_count,
                          InstructionOperand::kInvalidVirtualRegister, zone),
+      virtual_register_rename_(zone),
       scheduler_(nullptr),
-      frame_(frame) {
+      enable_scheduling_(enable_scheduling),
+      enable_serialization_(enable_serialization),
+      frame_(frame),
+      instruction_selection_failed_(false) {
   instructions_.reserve(node_count);
 }
 
-
-void InstructionSelector::SelectInstructions() {
+bool InstructionSelector::SelectInstructions() {
   // Mark the inputs of all phis in loop headers as used.
   BasicBlockVector* blocks = schedule()->rpo_order();
   for (auto const block : *blocks) {
@@ -62,22 +67,26 @@
   // Visit each basic block in post order.
   for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
     VisitBlock(*i);
+    if (instruction_selection_failed()) return false;
   }
 
   // Schedule the selected instructions.
-  if (FLAG_turbo_instruction_scheduling &&
-      InstructionScheduler::SchedulerSupported()) {
+  if (UseInstructionScheduling()) {
     scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
   }
 
   for (auto const block : *blocks) {
     InstructionBlock* instruction_block =
         sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
+    for (size_t i = 0; i < instruction_block->phis().size(); i++) {
+      UpdateRenamesInPhi(instruction_block->PhiAt(i));
+    }
     size_t end = instruction_block->code_end();
     size_t start = instruction_block->code_start();
     DCHECK_LE(end, start);
     StartBlock(RpoNumber::FromInt(block->rpo_number()));
     while (start-- > end) {
+      UpdateRenames(instructions_[start]);
       AddInstruction(instructions_[start]);
     }
     EndBlock(RpoNumber::FromInt(block->rpo_number()));
@@ -85,11 +94,11 @@
 #if DEBUG
   sequence()->ValidateSSA();
 #endif
+  return true;
 }
 
 void InstructionSelector::StartBlock(RpoNumber rpo) {
-  if (FLAG_turbo_instruction_scheduling &&
-      InstructionScheduler::SchedulerSupported()) {
+  if (UseInstructionScheduling()) {
     DCHECK_NOT_NULL(scheduler_);
     scheduler_->StartBlock(rpo);
   } else {
@@ -99,8 +108,7 @@
 
 
 void InstructionSelector::EndBlock(RpoNumber rpo) {
-  if (FLAG_turbo_instruction_scheduling &&
-      InstructionScheduler::SchedulerSupported()) {
+  if (UseInstructionScheduling()) {
     DCHECK_NOT_NULL(scheduler_);
     scheduler_->EndBlock(rpo);
   } else {
@@ -110,8 +118,7 @@
 
 
 void InstructionSelector::AddInstruction(Instruction* instr) {
-  if (FLAG_turbo_instruction_scheduling &&
-      InstructionScheduler::SchedulerSupported()) {
+  if (UseInstructionScheduling()) {
     DCHECK_NOT_NULL(scheduler_);
     scheduler_->AddInstruction(instr);
   } else {
@@ -206,6 +213,13 @@
     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
     size_t input_count, InstructionOperand* inputs, size_t temp_count,
     InstructionOperand* temps) {
+  if (output_count >= Instruction::kMaxOutputCount ||
+      input_count >= Instruction::kMaxInputCount ||
+      temp_count >= Instruction::kMaxTempCount) {
+    set_instruction_selection_failed();
+    return nullptr;
+  }
+
   Instruction* instr =
       Instruction::New(instruction_zone(), opcode, output_count, outputs,
                        input_count, inputs, temp_count, temps);
@@ -255,6 +269,53 @@
   return true;
 }
 
+void InstructionSelector::UpdateRenames(Instruction* instruction) {
+  for (size_t i = 0; i < instruction->InputCount(); i++) {
+    TryRename(instruction->InputAt(i));
+  }
+}
+
+void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
+  for (size_t i = 0; i < phi->operands().size(); i++) {
+    int vreg = phi->operands()[i];
+    int renamed = GetRename(vreg);
+    if (vreg != renamed) {
+      phi->RenameInput(i, renamed);
+    }
+  }
+}
+
+int InstructionSelector::GetRename(int virtual_register) {
+  int rename = virtual_register;
+  while (true) {
+    if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
+    int next = virtual_register_rename_[rename];
+    if (next == InstructionOperand::kInvalidVirtualRegister) {
+      break;
+    }
+    rename = next;
+  }
+  return rename;
+}
+
+void InstructionSelector::TryRename(InstructionOperand* op) {
+  if (!op->IsUnallocated()) return;
+  int vreg = UnallocatedOperand::cast(op)->virtual_register();
+  int rename = GetRename(vreg);
+  if (rename != vreg) {
+    UnallocatedOperand::cast(op)->set_virtual_register(rename);
+  }
+}
+
+void InstructionSelector::SetRename(const Node* node, const Node* rename) {
+  int vreg = GetVirtualRegister(node);
+  if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
+    int invalid = InstructionOperand::kInvalidVirtualRegister;
+    virtual_register_rename_.resize(vreg + 1, invalid);
+  }
+  virtual_register_rename_[vreg] = GetVirtualRegister(rename);
+}
+
 int InstructionSelector::GetVirtualRegister(const Node* node) {
   DCHECK_NOT_NULL(node);
   size_t const id = node->id();
@@ -330,6 +391,12 @@
   effect_level_[id] = effect_level;
 }
 
+bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
+  return (enable_serialization_ == kDisableSerialization &&
+          (linkage()->GetIncomingDescriptor()->flags() &
+           CallDescriptor::kCanUseRoots));
+}
+
 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
                                                const InstructionOperand& op) {
   UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
@@ -350,6 +417,10 @@
 InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
                                    FrameStateInputKind kind,
                                    MachineRepresentation rep) {
+  if (rep == MachineRepresentation::kNone) {
+    return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
+  }
+
   switch (input->opcode()) {
     case IrOpcode::kInt32Constant:
     case IrOpcode::kInt64Constant:
@@ -362,15 +433,13 @@
       UNREACHABLE();
       break;
     default:
-      if (rep == MachineRepresentation::kNone) {
-        return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
-      } else {
-        switch (kind) {
-          case FrameStateInputKind::kStackSlot:
-            return g->UseUniqueSlot(input);
-          case FrameStateInputKind::kAny:
-            return g->UseAny(input);
-        }
+      switch (kind) {
+        case FrameStateInputKind::kStackSlot:
+          return g->UseUniqueSlot(input);
+        case FrameStateInputKind::kAny:
+          // Currently deopts "wrap" other operations, so the deopt's inputs
+          // are potentially needed untill the end of the deoptimising code.
+          return g->UseAnyAtEnd(input);
       }
   }
   UNREACHABLE();
@@ -716,7 +785,6 @@
   }
 }
 
-
 void InstructionSelector::VisitBlock(BasicBlock* block) {
   DCHECK(!current_block_);
   current_block_ = block;
@@ -753,6 +821,7 @@
     // up".
     size_t current_node_end = instructions_.size();
     VisitNode(node);
+    if (instruction_selection_failed()) return;
     std::reverse(instructions_.begin() + current_node_end, instructions_.end());
     if (instructions_.size() == current_node_end) continue;
     // Mark source position on first instruction emitted.
@@ -1053,8 +1122,14 @@
       return VisitUint64LessThanOrEqual(node);
     case IrOpcode::kUint64Mod:
       return MarkAsWord64(node), VisitUint64Mod(node);
+    case IrOpcode::kBitcastTaggedToWord:
+      return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
+             VisitBitcastTaggedToWord(node);
     case IrOpcode::kBitcastWordToTagged:
       return MarkAsReference(node), VisitBitcastWordToTagged(node);
+    case IrOpcode::kBitcastWordToTaggedSigned:
+      return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
+             EmitIdentity(node);
     case IrOpcode::kChangeFloat32ToFloat64:
       return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
     case IrOpcode::kChangeInt32ToFloat64:
@@ -1065,19 +1140,6 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
-    case IrOpcode::kImpossibleToWord32:
-      return MarkAsWord32(node), VisitImpossibleToWord32(node);
-    case IrOpcode::kImpossibleToWord64:
-      return MarkAsWord64(node), VisitImpossibleToWord64(node);
-    case IrOpcode::kImpossibleToFloat32:
-      return MarkAsFloat32(node), VisitImpossibleToFloat32(node);
-    case IrOpcode::kImpossibleToFloat64:
-      return MarkAsFloat64(node), VisitImpossibleToFloat64(node);
-    case IrOpcode::kImpossibleToTagged:
-      MarkAsRepresentation(MachineType::PointerRepresentation(), node);
-      return VisitImpossibleToTagged(node);
-    case IrOpcode::kImpossibleToBit:
-      return MarkAsWord32(node), VisitImpossibleToBit(node);
     case IrOpcode::kFloat64SilenceNaN:
       MarkAsFloat64(node);
       if (CanProduceSignalingNaN(node->InputAt(0))) {
@@ -1304,9 +1366,15 @@
     }
     case IrOpcode::kAtomicStore:
       return VisitAtomicStore(node);
+    case IrOpcode::kProtectedLoad:
+      return VisitProtectedLoad(node);
     case IrOpcode::kUnsafePointerAdd:
       MarkAsRepresentation(MachineType::PointerRepresentation(), node);
       return VisitUnsafePointerAdd(node);
+    case IrOpcode::kCreateInt32x4:
+      return MarkAsSimd128(node), VisitCreateInt32x4(node);
+    case IrOpcode::kInt32x4ExtractLane:
+      return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1314,42 +1382,6 @@
   }
 }
 
-void InstructionSelector::VisitImpossibleToWord32(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-}
-
-void InstructionSelector::VisitImpossibleToWord64(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchImpossible,
-       g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
-}
-
-void InstructionSelector::VisitImpossibleToFloat32(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0f)));
-}
-
-void InstructionSelector::VisitImpossibleToFloat64(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0)));
-}
-
-void InstructionSelector::VisitImpossibleToBit(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-}
-
-void InstructionSelector::VisitImpossibleToTagged(Node* node) {
-  OperandGenerator g(this);
-#if V8_TARGET_ARCH_64_BIT
-  Emit(kArchImpossible,
-       g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
-#else   // V8_TARGET_ARCH_64_BIT
-  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-#endif  // V8_TARGET_ARCH_64_BIT
-}
-
 void InstructionSelector::VisitLoadStackPointer(Node* node) {
   OperandGenerator g(this);
   Emit(kArchStackPointer, g.DefineAsRegister(node));
@@ -1493,8 +1525,14 @@
        sequence()->AddImmediate(Constant(slot)), 0, nullptr);
 }
 
+void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
-  EmitIdentity(node);
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
 }
 
 // 32 bit targets do not implement the following instructions.
@@ -1647,7 +1685,6 @@
 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
   UNIMPLEMENTED();
 }
-
 #endif  // V8_TARGET_ARCH_32_BIT
 
 // 64 bit targets do not implement the following instructions.
@@ -1665,6 +1702,14 @@
 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 #endif  // V8_TARGET_ARCH_64_BIT
 
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+#endif  // !V8_TARGET_ARCH_X64
+
 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
 
 void InstructionSelector::VisitParameter(Node* node) {
@@ -1680,13 +1725,17 @@
   Emit(kArchNop, op);
 }
 
+namespace {
+LinkageLocation ExceptionLocation() {
+  return LinkageLocation::ForRegister(kReturnRegister0.code(),
+                                      MachineType::IntPtr());
+}
+}
 
 void InstructionSelector::VisitIfException(Node* node) {
   OperandGenerator g(this);
-  Node* call = node->InputAt(1);
-  DCHECK_EQ(IrOpcode::kCall, call->opcode());
-  const CallDescriptor* descriptor = CallDescriptorOf(call->op());
-  Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0)));
+  DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
+  Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
 }
 
 
@@ -1812,9 +1861,11 @@
   // Emit the call instruction.
   size_t const output_count = buffer.outputs.size();
   auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
-  Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
-       &buffer.instruction_args.front())
-      ->MarkAsCall();
+  Instruction* call_instr =
+      Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+           &buffer.instruction_args.front());
+  if (instruction_selection_failed()) return;
+  call_instr->MarkAsCall();
 }
 
 
@@ -1920,9 +1971,11 @@
     // Emit the call instruction.
     size_t output_count = buffer.outputs.size();
     auto* outputs = &buffer.outputs.front();
-    Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
-         &buffer.instruction_args.front())
-        ->MarkAsCall();
+    Instruction* call_instr =
+        Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+             &buffer.instruction_args.front());
+    if (instruction_selection_failed()) return;
+    call_instr->MarkAsCall();
     Emit(kArchRet, 0, nullptr, output_count, outputs);
   }
 }
@@ -1984,8 +2037,8 @@
 
 void InstructionSelector::EmitIdentity(Node* node) {
   OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+  MarkAsUsed(node->InputAt(0));
+  SetRename(node, node->InputAt(0));
 }
 
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index f9f43e9..2981f90 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -8,11 +8,11 @@
 #include <map>
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/instruction.h"
 #include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/instruction.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -48,16 +48,22 @@
   class Features;
 
   enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
+  enum EnableScheduling { kDisableScheduling, kEnableScheduling };
+  enum EnableSerialization { kDisableSerialization, kEnableSerialization };
 
   InstructionSelector(
       Zone* zone, size_t node_count, Linkage* linkage,
       InstructionSequence* sequence, Schedule* schedule,
       SourcePositionTable* source_positions, Frame* frame,
       SourcePositionMode source_position_mode = kCallSourcePositions,
-      Features features = SupportedFeatures());
+      Features features = SupportedFeatures(),
+      EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
+                                               ? kEnableScheduling
+                                               : kDisableScheduling,
+      EnableSerialization enable_serialization = kDisableSerialization);
 
   // Visit code for the entire graph with the included schedule.
-  void SelectInstructions();
+  bool SelectInstructions();
 
   void StartBlock(RpoNumber rpo);
   void EndBlock(RpoNumber rpo);
@@ -194,15 +200,31 @@
   int GetVirtualRegister(const Node* node);
   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
 
+  // Check if we can generate loads and stores of ExternalConstants relative
+  // to the roots register, i.e. if both a root register is available for this
+  // compilation unit and the serializer is disabled.
+  bool CanAddressRelativeToRootsRegister() const;
+
   Isolate* isolate() const { return sequence()->isolate(); }
 
  private:
   friend class OperandGenerator;
 
+  bool UseInstructionScheduling() const {
+    return (enable_scheduling_ == kEnableScheduling) &&
+           InstructionScheduler::SchedulerSupported();
+  }
+
   void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
   void EmitLookupSwitch(const SwitchInfo& sw,
                         InstructionOperand& value_operand);
 
+  void TryRename(InstructionOperand* op);
+  int GetRename(int virtual_register);
+  void SetRename(const Node* node, const Node* rename);
+  void UpdateRenames(Instruction* instruction);
+  void UpdateRenamesInPhi(PhiInstruction* phi);
+
   // Inform the instruction selection that {node} was just defined.
   void MarkAsDefined(Node* node);
 
@@ -228,6 +250,9 @@
   void MarkAsFloat64(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kFloat64, node);
   }
+  void MarkAsSimd128(Node* node) {
+    MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+  }
   void MarkAsReference(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kTagged, node);
   }
@@ -276,6 +301,8 @@
 
 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
   MACHINE_OP_LIST(DECLARE_GENERATOR)
+  MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
+  MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
 #undef DECLARE_GENERATOR
 
   void VisitFinishRegion(Node* node);
@@ -312,6 +339,11 @@
   Zone* instruction_zone() const { return sequence()->zone(); }
   Zone* zone() const { return zone_; }
 
+  void set_instruction_selection_failed() {
+    instruction_selection_failed_ = true;
+  }
+  bool instruction_selection_failed() { return instruction_selection_failed_; }
+
   // ===========================================================================
 
   Zone* const zone_;
@@ -327,8 +359,12 @@
   BoolVector used_;
   IntVector effect_level_;
   IntVector virtual_registers_;
+  IntVector virtual_register_rename_;
   InstructionScheduler* scheduler_;
+  EnableScheduling enable_scheduling_;
+  EnableSerialization enable_serialization_;
   Frame* frame_;
+  bool instruction_selection_failed_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 615b644..0df7ca0 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -314,7 +314,6 @@
   return true;
 }
 
-
 void Instruction::Print(const RegisterConfiguration* config) const {
   OFStream os(stdout);
   PrintableInstruction wrapper;
@@ -569,6 +568,10 @@
   operands_[offset] = virtual_register;
 }
 
+void PhiInstruction::RenameInput(size_t offset, int virtual_register) {
+  DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
+  operands_[offset] = virtual_register;
+}
 
 InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
                                    RpoNumber loop_header, RpoNumber loop_end,
@@ -631,6 +634,58 @@
   return instr_block;
 }
 
+std::ostream& operator<<(std::ostream& os,
+                         PrintableInstructionBlock& printable_block) {
+  const InstructionBlock* block = printable_block.block_;
+  const RegisterConfiguration* config = printable_block.register_configuration_;
+  const InstructionSequence* code = printable_block.code_;
+
+  os << "B" << block->rpo_number();
+  os << ": AO#" << block->ao_number();
+  if (block->IsDeferred()) os << " (deferred)";
+  if (!block->needs_frame()) os << " (no frame)";
+  if (block->must_construct_frame()) os << " (construct frame)";
+  if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
+  if (block->IsLoopHeader()) {
+    os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
+       << ")";
+  }
+  os << "  instructions: [" << block->code_start() << ", " << block->code_end()
+     << ")" << std::endl
+     << " predecessors:";
+
+  for (RpoNumber pred : block->predecessors()) {
+    os << " B" << pred.ToInt();
+  }
+  os << std::endl;
+
+  for (const PhiInstruction* phi : block->phis()) {
+    PrintableInstructionOperand printable_op = {config, phi->output()};
+    os << "     phi: " << printable_op << " =";
+    for (int input : phi->operands()) {
+      os << " v" << input;
+    }
+    os << std::endl;
+  }
+
+  ScopedVector<char> buf(32);
+  PrintableInstruction printable_instr;
+  printable_instr.register_configuration_ = config;
+  for (int j = block->first_instruction_index();
+       j <= block->last_instruction_index(); j++) {
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    SNPrintF(buf, "%5d", j);
+    printable_instr.instr_ = code->InstructionAt(j);
+    os << "   " << buf.start() << ": " << printable_instr << std::endl;
+  }
+
+  for (RpoNumber succ : block->successors()) {
+    os << " B" << succ.ToInt();
+  }
+  os << std::endl;
+  return os;
+}
+
 InstructionBlocks* InstructionSequence::InstructionBlocksFor(
     Zone* zone, const Schedule* schedule) {
   InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
@@ -874,7 +929,6 @@
   source_positions_.insert(std::make_pair(instr, value));
 }
 
-
 void InstructionSequence::Print(const RegisterConfiguration* config) const {
   OFStream os(stdout);
   PrintableInstructionSequence wrapper;
@@ -891,49 +945,8 @@
   RpoNumber rpo = RpoNumber::FromInt(block_id);
   const InstructionBlock* block = InstructionBlockAt(rpo);
   CHECK(block->rpo_number() == rpo);
-
-  os << "B" << block->rpo_number();
-  os << ": AO#" << block->ao_number();
-  if (block->IsDeferred()) os << " (deferred)";
-  if (!block->needs_frame()) os << " (no frame)";
-  if (block->must_construct_frame()) os << " (construct frame)";
-  if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
-  if (block->IsLoopHeader()) {
-    os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
-       << ")";
-  }
-  os << "  instructions: [" << block->code_start() << ", " << block->code_end()
-     << ")\n  predecessors:";
-
-  for (RpoNumber pred : block->predecessors()) {
-    os << " B" << pred.ToInt();
-  }
-  os << "\n";
-
-  for (const PhiInstruction* phi : block->phis()) {
-    PrintableInstructionOperand printable_op = {config, phi->output()};
-    os << "     phi: " << printable_op << " =";
-    for (int input : phi->operands()) {
-      os << " v" << input;
-    }
-    os << "\n";
-  }
-
-  ScopedVector<char> buf(32);
-  PrintableInstruction printable_instr;
-  printable_instr.register_configuration_ = config;
-  for (int j = block->first_instruction_index();
-       j <= block->last_instruction_index(); j++) {
-    // TODO(svenpanne) Add some basic formatting to our streams.
-    SNPrintF(buf, "%5d", j);
-    printable_instr.instr_ = InstructionAt(j);
-    os << "   " << buf.start() << ": " << printable_instr << "\n";
-  }
-
-  for (RpoNumber succ : block->successors()) {
-    os << " B" << succ.ToInt();
-  }
-  os << "\n";
+  PrintableInstructionBlock printable_block = {config, block, this};
+  os << printable_block << std::endl;
 }
 
 void InstructionSequence::PrintBlock(int block_id) const {
@@ -1020,8 +1033,11 @@
        it != code.constants_.end(); ++i, ++it) {
     os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
   }
+  PrintableInstructionBlock printable_block = {
+      printable.register_configuration_, nullptr, printable.sequence_};
   for (int i = 0; i < code.InstructionBlockCount(); i++) {
-    printable.sequence_->PrintBlock(printable.register_configuration_, i);
+    printable_block.block_ = code.InstructionBlockAt(RpoNumber::FromInt(i));
+    os << printable_block;
   }
   return os;
 }
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index b5aea70..b5c5914 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -17,7 +17,7 @@
 #include "src/compiler/source-position.h"
 #include "src/macro-assembler.h"
 #include "src/register-configuration.h"
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
@@ -33,7 +33,17 @@
 
   // TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
   // kInvalidVirtualRegister and some DCHECKS.
-  enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, EXPLICIT, ALLOCATED };
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT,
+    IMMEDIATE,
+    // Location operand kinds.
+    EXPLICIT,
+    ALLOCATED,
+    FIRST_LOCATION_OPERAND_KIND = EXPLICIT
+    // Location operand kinds must be last.
+  };
 
   InstructionOperand() : InstructionOperand(INVALID) {}
 
@@ -64,12 +74,16 @@
   INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
 #undef INSTRUCTION_OPERAND_PREDICATE
 
+  inline bool IsAnyLocationOperand() const;
+  inline bool IsLocationOperand() const;
+  inline bool IsFPLocationOperand() const;
   inline bool IsAnyRegister() const;
   inline bool IsRegister() const;
   inline bool IsFPRegister() const;
   inline bool IsFloatRegister() const;
   inline bool IsDoubleRegister() const;
   inline bool IsSimd128Register() const;
+  inline bool IsAnyStackSlot() const;
   inline bool IsStackSlot() const;
   inline bool IsFPStackSlot() const;
   inline bool IsFloatStackSlot() const;
@@ -105,6 +119,7 @@
 
   bool InterferesWith(const InstructionOperand& that) const;
 
+  // APIs to aid debugging. For general-stream APIs, use operator<<
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
@@ -481,17 +496,17 @@
   }
 
   static LocationOperand* cast(InstructionOperand* op) {
-    DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+    DCHECK(op->IsAnyLocationOperand());
     return static_cast<LocationOperand*>(op);
   }
 
   static const LocationOperand* cast(const InstructionOperand* op) {
-    DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+    DCHECK(op->IsAnyLocationOperand());
     return static_cast<const LocationOperand*>(op);
   }
 
   static LocationOperand cast(const InstructionOperand& op) {
-    DCHECK(ALLOCATED == op.kind() || EXPLICIT == op.kind());
+    DCHECK(op.IsAnyLocationOperand());
     return *static_cast<const LocationOperand*>(&op);
   }
 
@@ -531,9 +546,22 @@
 
 #undef INSTRUCTION_OPERAND_CASTS
 
+bool InstructionOperand::IsAnyLocationOperand() const {
+  return this->kind() >= FIRST_LOCATION_OPERAND_KIND;
+}
+
+bool InstructionOperand::IsLocationOperand() const {
+  return IsAnyLocationOperand() &&
+         !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsFPLocationOperand() const {
+  return IsAnyLocationOperand() &&
+         IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
 
 bool InstructionOperand::IsAnyRegister() const {
-  return (IsAllocated() || IsExplicit()) &&
+  return IsAnyLocationOperand() &&
          LocationOperand::cast(this)->location_kind() ==
              LocationOperand::REGISTER;
 }
@@ -567,22 +595,24 @@
              MachineRepresentation::kSimd128;
 }
 
-bool InstructionOperand::IsStackSlot() const {
-  return (IsAllocated() || IsExplicit()) &&
+bool InstructionOperand::IsAnyStackSlot() const {
+  return IsAnyLocationOperand() &&
          LocationOperand::cast(this)->location_kind() ==
-             LocationOperand::STACK_SLOT &&
+             LocationOperand::STACK_SLOT;
+}
+
+bool InstructionOperand::IsStackSlot() const {
+  return IsAnyStackSlot() &&
          !IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
 bool InstructionOperand::IsFPStackSlot() const {
-  return (IsAllocated() || IsExplicit()) &&
-         LocationOperand::cast(this)->location_kind() ==
-             LocationOperand::STACK_SLOT &&
+  return IsAnyStackSlot() &&
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
 bool InstructionOperand::IsFloatStackSlot() const {
-  return (IsAllocated() || IsExplicit()) &&
+  return IsAnyLocationOperand() &&
          LocationOperand::cast(this)->location_kind() ==
              LocationOperand::STACK_SLOT &&
          LocationOperand::cast(this)->representation() ==
@@ -590,7 +620,7 @@
 }
 
 bool InstructionOperand::IsDoubleStackSlot() const {
-  return (IsAllocated() || IsExplicit()) &&
+  return IsAnyLocationOperand() &&
          LocationOperand::cast(this)->location_kind() ==
              LocationOperand::STACK_SLOT &&
          LocationOperand::cast(this)->representation() ==
@@ -598,7 +628,7 @@
 }
 
 bool InstructionOperand::IsSimd128StackSlot() const {
-  return (IsAllocated() || IsExplicit()) &&
+  return IsAnyLocationOperand() &&
          LocationOperand::cast(this)->location_kind() ==
              LocationOperand::STACK_SLOT &&
          LocationOperand::cast(this)->representation() ==
@@ -606,7 +636,7 @@
 }
 
 uint64_t InstructionOperand::GetCanonicalizedValue() const {
-  if (IsAllocated() || IsExplicit()) {
+  if (IsAnyLocationOperand()) {
     MachineRepresentation canonical = MachineRepresentation::kNone;
     if (IsFPRegister()) {
       // We treat all FP register operands the same for simple aliasing.
@@ -672,6 +702,7 @@
     return source_.IsInvalid();
   }
 
+  // APIs to aid debugging. For general-stream APIs, use operator<<
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
@@ -856,10 +887,7 @@
     reference_map_ = nullptr;
   }
 
-  bool IsNop() const {
-    return arch_opcode() == kArchNop && InputCount() == 0 &&
-           OutputCount() == 0 && TempCount() == 0;
-  }
+  bool IsNop() const { return arch_opcode() == kArchNop; }
 
   bool IsDeoptimizeCall() const {
     return arch_opcode() == ArchOpcode::kArchDeoptimize ||
@@ -915,9 +943,18 @@
     block_ = block;
   }
 
+  // APIs to aid debugging. For general-stream APIs, use operator<<
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
+  typedef BitField<size_t, 0, 8> OutputCountField;
+  typedef BitField<size_t, 8, 16> InputCountField;
+  typedef BitField<size_t, 24, 6> TempCountField;
+
+  static const size_t kMaxOutputCount = OutputCountField::kMax;
+  static const size_t kMaxInputCount = InputCountField::kMax;
+  static const size_t kMaxTempCount = TempCountField::kMax;
+
  private:
   explicit Instruction(InstructionCode opcode);
 
@@ -926,9 +963,6 @@
               InstructionOperand* inputs, size_t temp_count,
               InstructionOperand* temps);
 
-  typedef BitField<size_t, 0, 8> OutputCountField;
-  typedef BitField<size_t, 8, 16> InputCountField;
-  typedef BitField<size_t, 24, 6> TempCountField;
   typedef BitField<bool, 30, 1> IsCallField;
 
   InstructionCode opcode_;
@@ -1184,6 +1218,7 @@
   PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
 
   void SetInput(size_t offset, int virtual_register);
+  void RenameInput(size_t offset, int virtual_register);
 
   int virtual_register() const { return virtual_register_; }
   const IntVector& operands() const { return operands_; }
@@ -1251,6 +1286,7 @@
 
   typedef ZoneVector<PhiInstruction*> PhiInstructions;
   const PhiInstructions& phis() const { return phis_; }
+  PhiInstruction* PhiAt(size_t i) const { return phis_[i]; }
   void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
 
   void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
@@ -1285,6 +1321,17 @@
   RpoNumber last_deferred_;
 };
 
+class InstructionSequence;
+
+struct PrintableInstructionBlock {
+  const RegisterConfiguration* register_configuration_;
+  const InstructionBlock* block_;
+  const InstructionSequence* code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionBlock& printable_block);
+
 typedef ZoneDeque<Constant> ConstantDeque;
 typedef std::map<int, Constant, std::less<int>,
                  zone_allocator<std::pair<const int, Constant> > > ConstantMap;
@@ -1343,8 +1390,7 @@
   void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
 
   bool IsReference(int virtual_register) const {
-    return GetRepresentation(virtual_register) ==
-           MachineRepresentation::kTagged;
+    return CanBeTaggedPointer(GetRepresentation(virtual_register));
   }
   bool IsFP(int virtual_register) const {
     return IsFloatingPoint(GetRepresentation(virtual_register));
@@ -1445,6 +1491,8 @@
     }
     return false;
   }
+
+  // APIs to aid debugging. For general-stream APIs, use operator<<
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 737947a..539a372 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -13,7 +13,7 @@
 
 #include "src/compiler/node.h"
 #include "src/wasm/wasm-module.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -778,6 +778,18 @@
       }
       break;
     }
+    case IrOpcode::kProjection: {
+      Node* call = node->InputAt(0);
+      DCHECK_EQ(IrOpcode::kCall, call->opcode());
+      CallDescriptor* descriptor =
+          const_cast<CallDescriptor*>(CallDescriptorOf(call->op()));
+      for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+        if (descriptor->GetReturnType(i) == MachineType::Int64()) {
+          UNREACHABLE();  // TODO(titzer): implement multiple i64 returns.
+        }
+      }
+      break;
+    }
     case IrOpcode::kWord64ReverseBytes: {
       Node* input = node->InputAt(0);
       ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 4ec4e82..084c07a 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -9,7 +9,7 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 926bd3f..41d4a00 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -10,9 +10,9 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -275,8 +275,8 @@
     // here is to learn on deopt, i.e. disable Array.prototype.push inlining
     // for this function.
     if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
-      value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
-                                        value, effect, control);
+      value = effect =
+          graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
     } else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
       value = effect =
           graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
@@ -323,6 +323,123 @@
   return NoChange();
 }
 
+namespace {
+
+bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+                            InstanceType instance_type) {
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckMaps &&
+        dominator->InputAt(0) == receiver) {
+      // Check if all maps have the given {instance_type}.
+      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+        Node* const map = NodeProperties::GetValueInput(dominator, i);
+        Type* const map_type = NodeProperties::GetType(map);
+        if (!map_type->IsConstant()) return false;
+        Handle<Map> const map_value =
+            Handle<Map>::cast(map_type->AsConstant()->Value());
+        if (map_value->instance_type() != instance_type) return false;
+      }
+      return true;
+    }
+    switch (dominator->opcode()) {
+      case IrOpcode::kStoreField: {
+        FieldAccess const& access = FieldAccessOf(dominator->op());
+        if (access.base_is_tagged == kTaggedBase &&
+            access.offset == HeapObject::kMapOffset) {
+          return false;
+        }
+        break;
+      }
+      case IrOpcode::kStoreElement:
+      case IrOpcode::kStoreTypedElement:
+        break;
+      default: {
+        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+        if (dominator->op()->EffectInputCount() != 1 ||
+            !dominator->op()->HasProperty(Operator::kNoWrite)) {
+          // Didn't find any appropriate CheckMaps node.
+          return false;
+        }
+        break;
+      }
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
+}  // namespace
+
+// ES6 section 20.3.4.10 Date.prototype.getTime ( )
+Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  if (HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
+    Node* value = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
+        effect, control);
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* object = (node->op()->ValueInputCount() >= 3)
+                     ? NodeProperties::GetValueInput(node, 2)
+                     : jsgraph()->UndefinedConstant();
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+  // stack trace doesn't contain the @@hasInstance call; we have the
+  // corresponding bug in the baseline case. Some massaging of the frame
+  // state would be necessary here.
+
+  // Morph this {node} into a JSOrdinaryHasInstance node.
+  node->ReplaceInput(0, receiver);
+  node->ReplaceInput(1, object);
+  node->ReplaceInput(2, context);
+  node->ReplaceInput(3, frame_state);
+  node->ReplaceInput(4, effect);
+  node->ReplaceInput(5, control);
+  node->TrimInputCount(6);
+  NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+  return Changed(node);
+}
+
+// ES6 section 18.2.2 isFinite ( number )
+Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // isFinite(a:plain-primitive) -> NumberEqual(a', a')
+    // where a' = NumberSubtract(ToNumber(a), ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, input);
+    Node* value = graph()->NewNode(simplified()->NumberEqual(), diff, diff);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 18.2.3 isNaN ( number )
+Reduction JSBuiltinReducer::ReduceGlobalIsNaN(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // isNaN(a:plain-primitive) -> BooleanNot(NumberEqual(a', a'))
+    // where a' = ToNumber(a)
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* check = graph()->NewNode(simplified()->NumberEqual(), input, input);
+    Node* value = graph()->NewNode(simplified()->BooleanNot(), check);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.1 Math.abs ( x )
 Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
   JSCallReduction r(node);
@@ -737,6 +854,60 @@
   return NoChange();
 }
 
+// ES6 section 20.1.2.2 Number.isFinite ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsFinite(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Number.isFinite(a:number) -> NumberEqual(a', a')
+    // where a' = NumberSubtract(a, a)
+    Node* input = r.GetJSCallInput(0);
+    Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, input);
+    Node* value = graph()->NewNode(simplified()->NumberEqual(), diff, diff);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.1.2.3 Number.isInteger ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsInteger(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Number.isInteger(x:number) -> NumberEqual(NumberSubtract(x, x'), #0)
+    // where x' = NumberTrunc(x)
+    Node* input = r.GetJSCallInput(0);
+    Node* trunc = graph()->NewNode(simplified()->NumberTrunc(), input);
+    Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, trunc);
+    Node* value = graph()->NewNode(simplified()->NumberEqual(), diff,
+                                   jsgraph()->ZeroConstant());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.1.2.4 Number.isNaN ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsNaN(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Number.isNaN(a:number) -> BooleanNot(NumberEqual(a, a))
+    Node* input = r.GetJSCallInput(0);
+    Node* check = graph()->NewNode(simplified()->NumberEqual(), input, input);
+    Node* value = graph()->NewNode(simplified()->BooleanNot(), check);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsSafeInteger(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(type_cache_.kSafeInteger)) {
+    // Number.isInteger(x:safe-integer) -> #true
+    Node* value = jsgraph()->TrueConstant();
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.1.2.13 Number.parseInt ( string, radix )
 Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
   JSCallReduction r(node);
@@ -887,51 +1058,146 @@
   return NoChange();
 }
 
-namespace {
+Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  if (HasInstanceTypeWitness(receiver, effect, JS_STRING_ITERATOR_TYPE)) {
+    Node* string = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
+        receiver, effect, control);
+    Node* index = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
+        receiver, effect, control);
+    Node* length = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForStringLength()), string,
+        effect, control);
 
-bool HasInstanceTypeWitness(Node* receiver, Node* effect,
-                            InstanceType instance_type) {
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        dominator->InputAt(0) == receiver) {
-      // Check if all maps have the given {instance_type}.
-      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
-        Node* const map = NodeProperties::GetValueInput(dominator, i);
-        Type* const map_type = NodeProperties::GetType(map);
-        if (!map_type->IsConstant()) return false;
-        Handle<Map> const map_value =
-            Handle<Map>::cast(map_type->AsConstant()->Value());
-        if (map_value->instance_type() != instance_type) return false;
-      }
-      return true;
-    }
-    switch (dominator->opcode()) {
-      case IrOpcode::kStoreField: {
-        FieldAccess const& access = FieldAccessOf(dominator->op());
-        if (access.base_is_tagged == kTaggedBase &&
-            access.offset == HeapObject::kMapOffset) {
-          return false;
+    // branch0: if (index < length)
+    Node* check0 =
+        graph()->NewNode(simplified()->NumberLessThan(), index, length);
+    Node* branch0 =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+    Node* etrue0 = effect;
+    Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+    Node* done_true;
+    Node* vtrue0;
+    {
+      done_true = jsgraph()->FalseConstant();
+      Node* lead = graph()->NewNode(simplified()->StringCharCodeAt(), string,
+                                    index, if_true0);
+
+      // branch1: if ((lead & 0xFC00) === 0xD800)
+      Node* check1 = graph()->NewNode(
+          simplified()->NumberEqual(),
+          graph()->NewNode(simplified()->NumberBitwiseAnd(), lead,
+                           jsgraph()->Int32Constant(0xFC00)),
+          jsgraph()->Int32Constant(0xD800));
+      Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check1, if_true0);
+      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+      Node* vtrue1;
+      {
+        Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+                                            jsgraph()->OneConstant());
+        // branch2: if ((index + 1) < length)
+        Node* check2 = graph()->NewNode(simplified()->NumberLessThan(),
+                                        next_index, length);
+        Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                         check2, if_true1);
+        Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+        Node* vtrue2;
+        {
+          Node* trail = graph()->NewNode(simplified()->StringCharCodeAt(),
+                                         string, next_index, if_true2);
+          // branch3: if ((trail & 0xFC00) === 0xDC00)
+          Node* check3 = graph()->NewNode(
+              simplified()->NumberEqual(),
+              graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
+                               jsgraph()->Int32Constant(0xFC00)),
+              jsgraph()->Int32Constant(0xDC00));
+          Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                           check3, if_true2);
+          Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+          Node* vtrue3;
+          {
+            vtrue3 = graph()->NewNode(
+                simplified()->NumberBitwiseOr(),
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+                graph()->NewNode(simplified()->NumberShiftLeft(), lead,
+                                 jsgraph()->Int32Constant(16)),
+                trail);
+#else
+                graph()->NewNode(simplified()->NumberShiftLeft(), trail,
+                                 jsgraph()->Int32Constant(16)),
+                lead);
+#endif
+          }
+
+          Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
+          Node* vfalse3 = lead;
+          if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
+          vtrue2 =
+              graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                               vtrue3, vfalse3, if_true2);
         }
-        break;
+
+        Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+        Node* vfalse2 = lead;
+        if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+        vtrue1 =
+            graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                             vtrue2, vfalse2, if_true1);
       }
-      case IrOpcode::kStoreElement:
-        break;
-      default: {
-        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
-        if (dominator->op()->EffectInputCount() != 1 ||
-            !dominator->op()->HasProperty(Operator::kNoWrite)) {
-          // Didn't find any appropriate CheckMaps node.
-          return false;
-        }
-        break;
-      }
+
+      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+      Node* vfalse1 = lead;
+      if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+      vtrue0 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue1, vfalse1, if_true0);
+      vtrue0 = graph()->NewNode(
+          simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
+
+      // Update iterator.[[NextIndex]]
+      Node* char_length = etrue0 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForStringLength()), vtrue0,
+          etrue0, if_true0);
+      index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
+      etrue0 = graph()->NewNode(
+          simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+          receiver, index, etrue0, if_true0);
     }
-    dominator = NodeProperties::GetEffectInput(dominator);
+
+    Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+    Node* done_false;
+    Node* vfalse0;
+    {
+      vfalse0 = jsgraph()->UndefinedConstant();
+      done_false = jsgraph()->TrueConstant();
+    }
+
+    control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+    effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
+    Node* value =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         vtrue0, vfalse0, control);
+    Node* done =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         done_true, done_false, control);
+
+    value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+                                      value, done, context, effect);
+
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
   }
+  return NoChange();
 }
 
-}  // namespace
-
 Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
     Node* node, InstanceType instance_type, FieldAccess const& access) {
   Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -939,27 +1205,21 @@
   Node* control = NodeProperties::GetControlInput(node);
   if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
     // Load the {receiver}s field.
-    Node* receiver_length = effect = graph()->NewNode(
+    Node* receiver_value = effect = graph()->NewNode(
         simplified()->LoadField(access), receiver, effect, control);
 
     // Check if the {receiver}s buffer was neutered.
     Node* receiver_buffer = effect = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
         receiver, effect, control);
-    Node* receiver_buffer_bitfield = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
-        receiver_buffer, effect, control);
-    Node* check = graph()->NewNode(
-        simplified()->NumberEqual(),
-        graph()->NewNode(
-            simplified()->NumberBitwiseAnd(), receiver_buffer_bitfield,
-            jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
-        jsgraph()->ZeroConstant());
+    Node* check = effect =
+        graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+                         receiver_buffer, effect, control);
 
     // Default to zero if the {receiver}s buffer was neutered.
     Node* value = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
-        check, receiver_length, jsgraph()->ZeroConstant());
+        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+        check, jsgraph()->ZeroConstant(), receiver_value);
 
     ReplaceWithValue(node, value, effect, control);
     return Replace(value);
@@ -978,6 +1238,17 @@
       return ReduceArrayPop(node);
     case kArrayPush:
       return ReduceArrayPush(node);
+    case kDateGetTime:
+      return ReduceDateGetTime(node);
+    case kFunctionHasInstance:
+      return ReduceFunctionHasInstance(node);
+      break;
+    case kGlobalIsFinite:
+      reduction = ReduceGlobalIsFinite(node);
+      break;
+    case kGlobalIsNaN:
+      reduction = ReduceGlobalIsNaN(node);
+      break;
     case kMathAbs:
       reduction = ReduceMathAbs(node);
       break;
@@ -1077,6 +1348,18 @@
     case kMathTrunc:
       reduction = ReduceMathTrunc(node);
       break;
+    case kNumberIsFinite:
+      reduction = ReduceNumberIsFinite(node);
+      break;
+    case kNumberIsInteger:
+      reduction = ReduceNumberIsInteger(node);
+      break;
+    case kNumberIsNaN:
+      reduction = ReduceNumberIsNaN(node);
+      break;
+    case kNumberIsSafeInteger:
+      reduction = ReduceNumberIsSafeInteger(node);
+      break;
     case kNumberParseInt:
       reduction = ReduceNumberParseInt(node);
       break;
@@ -1087,6 +1370,8 @@
       return ReduceStringCharAt(node);
     case kStringCharCodeAt:
       return ReduceStringCharCodeAt(node);
+    case kStringIteratorNext:
+      return ReduceStringIteratorNext(node);
     case kDataViewByteLength:
       return ReduceArrayBufferViewAccessor(
           node, JS_DATA_VIEW_TYPE,
@@ -1146,6 +1431,10 @@
   return jsgraph()->simplified();
 }
 
+JSOperatorBuilder* JSBuiltinReducer::javascript() const {
+  return jsgraph()->javascript();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index 2da8347..524d006 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -14,7 +14,6 @@
 // Forward declarations.
 class CompilationDependencies;
 class Factory;
-class TypeCache;
 
 namespace compiler {
 
@@ -22,8 +21,9 @@
 class CommonOperatorBuilder;
 struct FieldAccess;
 class JSGraph;
+class JSOperatorBuilder;
 class SimplifiedOperatorBuilder;
-
+class TypeCache;
 
 class JSBuiltinReducer final : public AdvancedReducer {
  public:
@@ -43,6 +43,10 @@
  private:
   Reduction ReduceArrayPop(Node* node);
   Reduction ReduceArrayPush(Node* node);
+  Reduction ReduceDateGetTime(Node* node);
+  Reduction ReduceFunctionHasInstance(Node* node);
+  Reduction ReduceGlobalIsFinite(Node* node);
+  Reduction ReduceGlobalIsNaN(Node* node);
   Reduction ReduceMathAbs(Node* node);
   Reduction ReduceMathAcos(Node* node);
   Reduction ReduceMathAcosh(Node* node);
@@ -76,10 +80,15 @@
   Reduction ReduceMathTan(Node* node);
   Reduction ReduceMathTanh(Node* node);
   Reduction ReduceMathTrunc(Node* node);
+  Reduction ReduceNumberIsFinite(Node* node);
+  Reduction ReduceNumberIsInteger(Node* node);
+  Reduction ReduceNumberIsNaN(Node* node);
+  Reduction ReduceNumberIsSafeInteger(Node* node);
   Reduction ReduceNumberParseInt(Node* node);
   Reduction ReduceStringCharAt(Node* node);
   Reduction ReduceStringCharCodeAt(Node* node);
   Reduction ReduceStringFromCharCode(Node* node);
+  Reduction ReduceStringIteratorNext(Node* node);
   Reduction ReduceArrayBufferViewAccessor(Node* node,
                                           InstanceType instance_type,
                                           FieldAccess const& access);
@@ -94,6 +103,7 @@
   Isolate* isolate() const;
   CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() const;
+  JSOperatorBuilder* javascript() const;
   CompilationDependencies* dependencies() const { return dependencies_; }
 
   CompilationDependencies* const dependencies_;
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index e390214..dd8f064 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -14,30 +14,6 @@
 namespace internal {
 namespace compiler {
 
-namespace {
-
-VectorSlotPair CallCountFeedback(VectorSlotPair p) {
-  // Extract call count from {p}.
-  if (!p.IsValid()) return VectorSlotPair();
-  CallICNexus n(p.vector(), p.slot());
-  int const call_count = n.ExtractCallCount();
-  if (call_count <= 0) return VectorSlotPair();
-
-  // Create megamorphic CallIC feedback with the given {call_count}.
-  StaticFeedbackVectorSpec spec;
-  FeedbackVectorSlot slot = spec.AddCallICSlot();
-  Handle<TypeFeedbackMetadata> metadata =
-      TypeFeedbackMetadata::New(n.GetIsolate(), &spec);
-  Handle<TypeFeedbackVector> vector =
-      TypeFeedbackVector::New(n.GetIsolate(), metadata);
-  CallICNexus nexus(vector, slot);
-  nexus.ConfigureMegamorphic(call_count);
-  return VectorSlotPair(vector, slot);
-}
-
-}  // namespace
-
-
 Reduction JSCallReducer::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kJSCallConstruct:
@@ -166,7 +142,7 @@
   }
   // Change {node} to the new {JSCallFunction} operator.
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
                                        convert_mode, p.tail_call_mode()));
   // Change context of {node} to the Function.prototype.apply context,
   // to ensure any exception is thrown in the correct context.
@@ -206,7 +182,7 @@
     --arity;
   }
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
                                        convert_mode, p.tail_call_mode()));
   // Try to further reduce the JSCallFunction {node}.
   Reduction const reduction = ReduceJSCallFunction(node);
@@ -287,7 +263,7 @@
         arity++;
       }
       NodeProperties::ChangeOp(node, javascript()->CallFunction(
-                                         arity, CallCountFeedback(p.feedback()),
+                                         arity, p.frequency(), VectorSlotPair(),
                                          convert_mode, p.tail_call_mode()));
       // Try to further reduce the JSCallFunction {node}.
       Reduction const reduction = ReduceJSCallFunction(node);
@@ -305,6 +281,20 @@
   // Extract feedback from the {node} using the CallICNexus.
   if (!p.feedback().IsValid()) return NoChange();
   CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+  if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
+    Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+    Node* deoptimize = graph()->NewNode(
+        common()->Deoptimize(
+            DeoptimizeKind::kSoft,
+            DeoptimizeReason::kInsufficientTypeFeedbackForCall),
+        frame_state, effect, control);
+    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+    Revisit(graph()->end());
+    node->TrimInputCount(0);
+    NodeProperties::ChangeOp(node, common()->Dead());
+    return Changed(node);
+  }
   Handle<Object> feedback(nexus.GetFeedback(), isolate());
   if (feedback->IsAllocationSite()) {
     // Retrieve the Array function from the {node}.
@@ -386,8 +376,8 @@
         // Check if we have an allocation site.
         Handle<AllocationSite> site;
         if (p.feedback().IsValid()) {
-          Handle<Object> feedback(
-              p.feedback().vector()->Get(p.feedback().slot()), isolate());
+          CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+          Handle<Object> feedback(nexus.GetFeedback(), isolate());
           if (feedback->IsAllocationSite()) {
             site = Handle<AllocationSite>::cast(feedback);
           }
@@ -412,10 +402,9 @@
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
-  // TODO(mvstanton): Use ConstructICNexus here, once available.
-  Handle<Object> feedback;
   if (!p.feedback().IsValid()) return NoChange();
-  feedback = handle(p.feedback().vector()->Get(p.feedback().slot()), isolate());
+  CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+  Handle<Object> feedback(nexus.GetFeedback(), isolate());
   if (feedback->IsAllocationSite()) {
     // The feedback is an AllocationSite, which means we have called the
     // Array function and collected transition (and pretenuring) feedback
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 8d9700a..0c3835c 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -20,18 +20,22 @@
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public Reducer {
+class JSCallReducer final : public AdvancedReducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
+    kBailoutOnUninitialized = 1u << 0,
+    kDeoptimizationEnabled = 1u << 1
   };
   typedef base::Flags<Flag> Flags;
 
-  JSCallReducer(JSGraph* jsgraph, Flags flags,
+  JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
                 MaybeHandle<Context> native_context)
-      : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+      : AdvancedReducer(editor),
+        jsgraph_(jsgraph),
+        flags_(flags),
+        native_context_(native_context) {}
 
   Reduction Reduce(Node* node) final;
 
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index f2c5edd..b68bb70 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -722,16 +722,25 @@
   DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
   Node* value = NodeProperties::GetValueInput(node, 0);
   Node* done = NodeProperties::GetValueInput(node, 1);
-  Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
 
-  // Load the JSIteratorResult map for the {context}.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* iterator_result_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
-      native_context, native_context, effect);
+  Node* iterator_result_map;
+  Handle<Context> native_context;
+  if (GetSpecializationNativeContext(node).ToHandle(&native_context)) {
+    // Specialize to the constant JSIteratorResult map to enable map check
+    // elimination to eliminate subsequent checks in case of inlining.
+    iterator_result_map = jsgraph()->HeapConstant(
+        handle(native_context->iterator_result_map(), isolate()));
+  } else {
+    // Load the JSIteratorResult map for the {context}.
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    iterator_result_map = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+        native_context, native_context, effect);
+  }
 
   // Emit code to allocate the JSIteratorResult instance.
   AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -815,6 +824,7 @@
 
 Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
   Node* object = NodeProperties::GetValueInput(node, 0);
   Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -823,12 +833,20 @@
   Node* native_context = effect = graph()->NewNode(
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
       context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
+
+  AllocationBuilder aa(jsgraph(), effect, control);
+  aa.Allocate(ContextExtension::kSize);
+  aa.Store(AccessBuilder::ForMap(), factory()->context_extension_map());
+  aa.Store(AccessBuilder::ForContextExtensionScopeInfo(), scope_info);
+  aa.Store(AccessBuilder::ForContextExtensionExtension(), object);
+  Node* extension = aa.Finish();
+
+  AllocationBuilder a(jsgraph(), extension, control);
   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
   a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
   a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
   a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
           native_context);
   RelaxControls(node);
@@ -838,7 +856,8 @@
 
 Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
-  Handle<String> name = OpParameter<Handle<String>>(node);
+  const CreateCatchContextParameters& parameters =
+      CreateCatchContextParametersOf(node->op());
   Node* exception = NodeProperties::GetValueInput(node, 0);
   Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -847,13 +866,23 @@
   Node* native_context = effect = graph()->NewNode(
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
       context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
+
+  AllocationBuilder aa(jsgraph(), effect, control);
+  aa.Allocate(ContextExtension::kSize);
+  aa.Store(AccessBuilder::ForMap(), factory()->context_extension_map());
+  aa.Store(AccessBuilder::ForContextExtensionScopeInfo(),
+           parameters.scope_info());
+  aa.Store(AccessBuilder::ForContextExtensionExtension(),
+           parameters.catch_name());
+  Node* extension = aa.Finish();
+
+  AllocationBuilder a(jsgraph(), extension, control);
   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
   a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
                   factory()->catch_context_map());
   a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
   a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
   a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
           native_context);
   a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
@@ -1013,10 +1042,17 @@
   ElementAccess access = IsFastDoubleElementsKind(elements_kind)
                              ? AccessBuilder::ForFixedDoubleArrayElement()
                              : AccessBuilder::ForFixedArrayElement();
-  Node* value =
-      IsFastDoubleElementsKind(elements_kind)
-          ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
-          : jsgraph()->TheHoleConstant();
+  Node* value;
+  if (IsFastDoubleElementsKind(elements_kind)) {
+    // Load the hole NaN pattern from the canonical location.
+    value = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
+        jsgraph()->ExternalConstant(
+            ExternalReference::address_of_the_hole_nan()),
+        effect, control);
+  } else {
+    value = jsgraph()->TheHoleConstant();
+  }
 
   // Actually allocate the backing store.
   AllocationBuilder a(jsgraph(), effect, control);
@@ -1065,8 +1101,8 @@
         boilerplate_map->instance_descriptors()->GetKey(i), isolate());
     FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
     FieldAccess access = {
-        kTaggedBase,    index.offset(),           property_name,
-        Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+        kTaggedBase, index.offset(),           property_name,
+        Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
     Node* value;
     if (boilerplate->IsUnboxedDoubleField(index)) {
       access.machine_type = MachineType::Float64();
@@ -1169,18 +1205,18 @@
   if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
     Handle<FixedDoubleArray> elements =
         Handle<FixedDoubleArray>::cast(boilerplate_elements);
+    Node* the_hole_value = nullptr;
     for (int i = 0; i < elements_length; ++i) {
       if (elements->is_the_hole(i)) {
-        // TODO(turbofan): We cannot currently safely pass thru the (signaling)
-        // hole NaN in C++ code, as the C++ compiler on Intel might use FPU
-        // instructions/registers for doubles and therefore make the NaN quiet.
-        // We should consider passing doubles in the compiler as raw int64
-        // values to prevent this.
-        elements_values[i] = effect =
-            graph()->NewNode(simplified()->LoadElement(
-                                 AccessBuilder::ForFixedDoubleArrayElement()),
-                             jsgraph()->HeapConstant(elements),
-                             jsgraph()->Constant(i), effect, control);
+        if (the_hole_value == nullptr) {
+          // Load the hole NaN pattern from the canonical location.
+          the_hole_value = effect = graph()->NewNode(
+              simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
+              jsgraph()->ExternalConstant(
+                  ExternalReference::address_of_the_hole_nan()),
+              effect, control);
+        }
+        elements_values[i] = the_hole_value;
       } else {
         elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
       }
@@ -1244,6 +1280,13 @@
   return MaybeHandle<LiteralsArray>();
 }
 
+MaybeHandle<Context> JSCreateLowering::GetSpecializationNativeContext(
+    Node* node) {
+  Node* const context = NodeProperties::GetContextInput(node);
+  return NodeProperties::GetSpecializationNativeContext(context,
+                                                        native_context_);
+}
+
 Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
 
 Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index 2262e66..6248ca2 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -31,11 +31,12 @@
  public:
   JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
                    JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
-                   Zone* zone)
+                   MaybeHandle<Context> native_context, Zone* zone)
       : AdvancedReducer(editor),
         dependencies_(dependencies),
         jsgraph_(jsgraph),
         literals_array_(literals_array),
+        native_context_(native_context),
         zone_(zone) {}
   ~JSCreateLowering() final {}
 
@@ -76,6 +77,8 @@
 
   // Infers the LiteralsArray to use for a given {node}.
   MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+  // Infers the native context to use for a given {node}.
+  MaybeHandle<Context> GetSpecializationNativeContext(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -91,6 +94,7 @@
   CompilationDependencies* const dependencies_;
   JSGraph* const jsgraph_;
   MaybeHandle<LiteralsArray> const literals_array_;
+  MaybeHandle<Context> const native_context_;
   Zone* const zone_;
 };
 
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 812d3e7..22d6c86 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/js-generic-lowering.h"
+
+#include "src/ast/ast.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
@@ -45,13 +47,6 @@
   }
   return Changed(node);
 }
-#define REPLACE_RUNTIME_CALL(op, fun)             \
-  void JSGenericLowering::Lower##op(Node* node) { \
-    ReplaceWithRuntimeCall(node, fun);            \
-  }
-REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
-#undef REPLACE_RUNTIME_CALL
 
 #define REPLACE_STUB_CALL(Name)                                \
   void JSGenericLowering::LowerJS##Name(Node* node) {          \
@@ -93,8 +88,10 @@
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags,
                                             Operator::Properties properties) {
+  const CallInterfaceDescriptor& descriptor = callable.descriptor();
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, flags, properties);
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+      properties);
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
   node->InsertInput(zone(), 0, stub_code);
   NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -346,6 +343,11 @@
   ReplaceWithStubCall(node, callable, flags);
 }
 
+void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::OrdinaryHasInstance(isolate());
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 void JSGenericLowering::LowerJSLoadContext(Node* node) {
   const ContextAccess& access = ContextAccessOf(node->op());
@@ -513,11 +515,20 @@
 
 
 void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
-  Handle<String> name = OpParameter<Handle<String>>(node);
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
+  const CreateCatchContextParameters& parameters =
+      CreateCatchContextParametersOf(node->op());
+  node->InsertInput(zone(), 0,
+                    jsgraph()->HeapConstant(parameters.catch_name()));
+  node->InsertInput(zone(), 2,
+                    jsgraph()->HeapConstant(parameters.scope_info()));
   ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
 }
 
+void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
+  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+  node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+  ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
+}
 
 void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
   Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
@@ -577,12 +588,10 @@
   ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
 }
 
-
-void JSGenericLowering::LowerJSForInDone(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInDone);
+void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
+  ReplaceWithRuntimeCall(node, Runtime::kConvertReceiver);
 }
 
-
 void JSGenericLowering::LowerJSForInNext(Node* node) {
   ReplaceWithRuntimeCall(node, Runtime::kForInNext);
 }
@@ -592,12 +601,6 @@
   ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
 }
 
-
-void JSGenericLowering::LowerJSForInStep(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInStep);
-}
-
-
 void JSGenericLowering::LowerJSLoadMessage(Node* node) {
   ExternalReference message_address =
       ExternalReference::address_of_pending_message_obj(isolate());
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 2b4bf1c..10130f4 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -11,9 +11,9 @@
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
 #include "src/lookup.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -48,6 +48,23 @@
   return NoChange();
 }
 
+namespace {
+
+FieldAccess ForPropertyCellValue(MachineRepresentation representation,
+                                 Type* type, Handle<Name> name) {
+  WriteBarrierKind kind = kFullWriteBarrier;
+  if (representation == MachineRepresentation::kTaggedSigned) {
+    kind = kNoWriteBarrier;
+  } else if (representation == MachineRepresentation::kTaggedPointer) {
+    kind = kPointerWriteBarrier;
+  }
+  MachineType r = MachineType::TypeForRepresentation(representation);
+  FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
+                        kind};
+  return access;
+}
+}  // namespace
+
 Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
   Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
@@ -104,24 +121,31 @@
   }
 
   // Load from constant type cell can benefit from type feedback.
-  Type* property_cell_value_type = Type::Tagged();
+  Type* property_cell_value_type = Type::NonInternal();
+  MachineRepresentation representation = MachineRepresentation::kTagged;
   if (property_details.cell_type() == PropertyCellType::kConstantType) {
     // Compute proper type based on the current value in the cell.
     if (property_cell_value->IsSmi()) {
       property_cell_value_type = type_cache_.kSmi;
+      representation = MachineRepresentation::kTaggedSigned;
     } else if (property_cell_value->IsNumber()) {
+      // TODO(mvstanton): Remove kHeapNumber from type cache, it's just
+      // Type::Number().
       property_cell_value_type = type_cache_.kHeapNumber;
+      representation = MachineRepresentation::kTaggedPointer;
     } else {
+      // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
+      // below and use it in LoadElimination to eliminate map checks.
       Handle<Map> property_cell_value_map(
           Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-      property_cell_value_type =
-          Type::Class(property_cell_value_map, graph()->zone());
+      property_cell_value_type = Type::For(property_cell_value_map);
+      representation = MachineRepresentation::kTaggedPointer;
     }
   }
-  Node* value = effect = graph()->NewNode(
-      simplified()->LoadField(
-          AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
-      jsgraph()->HeapConstant(property_cell), effect, control);
+  Node* value = effect =
+      graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
+                           representation, property_cell_value_type, name)),
+                       jsgraph()->HeapConstant(property_cell), effect, control);
   ReplaceWithValue(node, value, effect, control);
   return Replace(value);
 }
@@ -180,6 +204,7 @@
       // values' type doesn't match the type of the previous value in the cell.
       dependencies()->AssumePropertyCell(property_cell);
       Type* property_cell_value_type;
+      MachineRepresentation representation = MachineRepresentation::kTagged;
       if (property_cell_value->IsHeapObject()) {
         // We cannot do anything if the {property_cell_value}s map is no
         // longer stable.
@@ -189,23 +214,25 @@
         dependencies()->AssumeMapStable(property_cell_value_map);
 
         // Check that the {value} is a HeapObject.
-        value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+        value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
                                           value, effect, control);
 
         // Check {value} map agains the {property_cell} map.
         effect = graph()->NewNode(
             simplified()->CheckMaps(1), value,
             jsgraph()->HeapConstant(property_cell_value_map), effect, control);
-        property_cell_value_type = Type::TaggedPointer();
+        property_cell_value_type = Type::OtherInternal();
+        representation = MachineRepresentation::kTaggedPointer;
       } else {
         // Check that the {value} is a Smi.
-        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
-                                          value, effect, control);
-        property_cell_value_type = Type::TaggedSigned();
+        value = effect =
+            graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+        property_cell_value_type = Type::SignedSmall();
+        representation = MachineRepresentation::kTaggedSigned;
       }
       effect = graph()->NewNode(
-          simplified()->StoreField(
-              AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+          simplified()->StoreField(ForPropertyCellValue(
+              representation, property_cell_value_type, name)),
           jsgraph()->HeapConstant(property_cell), value, effect, control);
       break;
     }
@@ -219,7 +246,8 @@
         dependencies()->AssumePropertyCell(property_cell);
       }
       effect = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForPropertyCellValue()),
+          simplified()->StoreField(ForPropertyCellValue(
+              MachineRepresentation::kTagged, Type::NonInternal(), name)),
           jsgraph()->HeapConstant(property_cell), value, effect, control);
       break;
     }
@@ -251,7 +279,7 @@
   Handle<Context> script_context = ScriptContextTable::GetContext(
       script_context_table, lookup_result.context_index);
   result->context = script_context;
-  result->immutable = IsImmutableVariableMode(lookup_result.mode);
+  result->immutable = lookup_result.mode == CONST;
   result->index = lookup_result.slot_index;
   return true;
 }
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
index 3ffc67a..a6c511e 100644
--- a/src/compiler/js-global-object-specialization.h
+++ b/src/compiler/js-global-object-specialization.h
@@ -12,8 +12,6 @@
 
 // Forward declarations.
 class CompilationDependencies;
-class TypeCache;
-
 
 namespace compiler {
 
@@ -22,7 +20,7 @@
 class JSGraph;
 class JSOperatorBuilder;
 class SimplifiedOperatorBuilder;
-
+class TypeCache;
 
 // Specializes a given JSGraph to a given global object, potentially constant
 // folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index ce7b33b..5c626d1 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -4,14 +4,63 @@
 
 #include "src/compiler/js-inlining-heuristic.h"
 
-#include "src/compiler.h"
+#include "src/compilation-info.h"
+#include "src/compiler/common-operator.h"
 #include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+#define TRACE(...)                                      \
+  do {                                                  \
+    if (FLAG_trace_turbo_inlining) PrintF(__VA_ARGS__); \
+  } while (false)
+
+namespace {
+
+int CollectFunctions(Node* node, Handle<JSFunction>* functions,
+                     int functions_size) {
+  DCHECK_NE(0u, functions_size);
+  HeapObjectMatcher m(node);
+  if (m.HasValue() && m.Value()->IsJSFunction()) {
+    functions[0] = Handle<JSFunction>::cast(m.Value());
+    return 1;
+  }
+  if (m.IsPhi()) {
+    int const value_input_count = m.node()->op()->ValueInputCount();
+    if (value_input_count > functions_size) return 0;
+    for (int n = 0; n < value_input_count; ++n) {
+      HeapObjectMatcher m(node->InputAt(n));
+      if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
+      functions[n] = Handle<JSFunction>::cast(m.Value());
+    }
+    return value_input_count;
+  }
+  return 0;
+}
+
+bool CanInlineFunction(Handle<JSFunction> function) {
+  // Built-in functions are handled by the JSBuiltinReducer.
+  if (function->shared()->HasBuiltinFunctionId()) return false;
+
+  // Don't inline builtins.
+  if (function->shared()->IsBuiltin()) return false;
+
+  // Quick check on the size of the AST to avoid parsing large candidate.
+  if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+    return false;
+  }
+
+  // Avoid inlining across the boundary of asm.js code.
+  if (function->shared()->asm_function()) return false;
+  return true;
+}
+
+}  // namespace
+
 Reduction JSInliningHeuristic::Reduce(Node* node) {
   if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
 
@@ -19,14 +68,61 @@
   if (seen_.find(node->id()) != seen_.end()) return NoChange();
   seen_.insert(node->id());
 
+  // Check if the {node} is an appropriate candidate for inlining.
   Node* callee = node->InputAt(0);
-  HeapObjectMatcher match(callee);
-  if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
-  Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+  Candidate candidate;
+  candidate.node = node;
+  candidate.num_functions =
+      CollectFunctions(callee, candidate.functions, kMaxCallPolymorphism);
+  if (candidate.num_functions == 0) {
+    return NoChange();
+  } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
+    TRACE(
+        "Not considering call site #%d:%s, because polymorphic inlining "
+        "is disabled\n",
+        node->id(), node->op()->mnemonic());
+    return NoChange();
+  }
 
   // Functions marked with %SetForceInlineFlag are immediately inlined.
-  if (function->shared()->force_inline()) {
-    return inliner_.ReduceJSCall(node, function);
+  bool can_inline = false, force_inline = true;
+  for (int i = 0; i < candidate.num_functions; ++i) {
+    Handle<JSFunction> function = candidate.functions[i];
+    if (!function->shared()->force_inline()) {
+      force_inline = false;
+    }
+    if (CanInlineFunction(function)) {
+      can_inline = true;
+    }
+  }
+  if (force_inline) return InlineCandidate(candidate);
+  if (!can_inline) return NoChange();
+
+  // Stop inlining once the maximum allowed level is reached.
+  int level = 0;
+  for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
+       frame_state->opcode() == IrOpcode::kFrameState;
+       frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    if (FrameStateFunctionInfo::IsJSFunctionType(frame_info.type())) {
+      if (++level > FLAG_max_inlining_levels) {
+        TRACE(
+            "Not considering call site #%d:%s, because inlining depth "
+            "%d exceeds maximum allowed level %d\n",
+            node->id(), node->op()->mnemonic(), level,
+            FLAG_max_inlining_levels);
+        return NoChange();
+      }
+    }
+  }
+
+  // Gather feedback on how often this call site has been hit before.
+  if (node->opcode() == IrOpcode::kJSCallFunction) {
+    CallFunctionParameters const p = CallFunctionParametersOf(node->op());
+    candidate.frequency = p.frequency();
+  } else {
+    CallConstructParameters const p = CallConstructParametersOf(node->op());
+    candidate.frequency = p.frequency();
   }
 
   // Handling of special inlining modes right away:
@@ -36,75 +132,16 @@
     case kRestrictedInlining:
       return NoChange();
     case kStressInlining:
-      return inliner_.ReduceJSCall(node, function);
+      return InlineCandidate(candidate);
     case kGeneralInlining:
       break;
   }
 
-  // ---------------------------------------------------------------------------
-  // Everything below this line is part of the inlining heuristic.
-  // ---------------------------------------------------------------------------
-
-  // Built-in functions are handled by the JSBuiltinReducer.
-  if (function->shared()->HasBuiltinFunctionId()) return NoChange();
-
-  // Don't inline builtins.
-  if (function->shared()->IsBuiltin()) return NoChange();
-
-  // Quick check on source code length to avoid parsing large candidate.
-  if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
-    return NoChange();
-  }
-
-  // Quick check on the size of the AST to avoid parsing large candidate.
-  if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
-    return NoChange();
-  }
-
-  // Avoid inlining within or across the boundary of asm.js code.
-  if (info_->shared_info()->asm_function()) return NoChange();
-  if (function->shared()->asm_function()) return NoChange();
-
-  // Stop inlinining once the maximum allowed level is reached.
-  int level = 0;
-  for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
-       frame_state->opcode() == IrOpcode::kFrameState;
-       frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
-    if (++level > FLAG_max_inlining_levels) return NoChange();
-  }
-
-  // Gather feedback on how often this call site has been hit before.
-  int calls = -1;  // Same default as CallICNexus::ExtractCallCount.
-  if (node->opcode() == IrOpcode::kJSCallFunction) {
-    CallFunctionParameters p = CallFunctionParametersOf(node->op());
-    if (p.feedback().IsValid()) {
-      CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
-      calls = nexus.ExtractCallCount();
-    }
-  } else {
-    DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
-    CallConstructParameters p = CallConstructParametersOf(node->op());
-    if (p.feedback().IsValid()) {
-      int const extra_index =
-          p.feedback().vector()->GetIndex(p.feedback().slot()) + 1;
-      Handle<Object> feedback_extra(p.feedback().vector()->get(extra_index),
-                                    function->GetIsolate());
-      if (feedback_extra->IsSmi()) {
-        calls = Handle<Smi>::cast(feedback_extra)->value();
-      }
-    }
-  }
-
-  // ---------------------------------------------------------------------------
-  // Everything above this line is part of the inlining heuristic.
-  // ---------------------------------------------------------------------------
-
   // In the general case we remember the candidate for later.
-  candidates_.insert({function, node, calls});
+  candidates_.insert(candidate);
   return NoChange();
 }
 
-
 void JSInliningHeuristic::Finalize() {
   if (candidates_.empty()) return;  // Nothing to do without candidates.
   if (FLAG_trace_turbo_inlining) PrintCandidates();
@@ -120,36 +157,147 @@
     candidates_.erase(i);
     // Make sure we don't try to inline dead candidate nodes.
     if (!candidate.node->IsDead()) {
-      Reduction r = inliner_.ReduceJSCall(candidate.node, candidate.function);
-      if (r.Changed()) {
-        cumulative_count_ += candidate.function->shared()->ast_node_count();
-        return;
-      }
+      Reduction const reduction = InlineCandidate(candidate);
+      if (reduction.Changed()) return;
     }
   }
 }
 
+Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
+  int const num_calls = candidate.num_functions;
+  Node* const node = candidate.node;
+  if (num_calls == 1) {
+    Handle<JSFunction> function = candidate.functions[0];
+    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    if (reduction.Changed()) {
+      cumulative_count_ += function->shared()->ast_node_count();
+    }
+    return reduction;
+  }
+
+  // Expand the JSCallFunction/JSCallConstruct node to a subgraph first if
+  // we have multiple known target functions.
+  DCHECK_LT(1, num_calls);
+  Node* calls[kMaxCallPolymorphism + 1];
+  Node* if_successes[kMaxCallPolymorphism];
+  Node* callee = NodeProperties::GetValueInput(node, 0);
+  Node* fallthrough_control = NodeProperties::GetControlInput(node);
+
+  // Setup the inputs for the cloned call nodes.
+  int const input_count = node->InputCount();
+  Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
+  for (int i = 0; i < input_count; ++i) {
+    inputs[i] = node->InputAt(i);
+  }
+
+  // Create the appropriate control flow to dispatch to the cloned calls.
+  for (int i = 0; i < num_calls; ++i) {
+    Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
+    if (i != (num_calls - 1)) {
+      Node* check =
+          graph()->NewNode(simplified()->ReferenceEqual(), callee, target);
+      Node* branch =
+          graph()->NewNode(common()->Branch(), check, fallthrough_control);
+      fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+      if_successes[i] = graph()->NewNode(common()->IfTrue(), branch);
+    } else {
+      if_successes[i] = fallthrough_control;
+    }
+
+    // The first input to the call is the actual target (which we specialize
+    // to the known {target}); the last input is the control dependency.
+    inputs[0] = target;
+    inputs[input_count - 1] = if_successes[i];
+    calls[i] = graph()->NewNode(node->op(), input_count, inputs);
+    if_successes[i] = graph()->NewNode(common()->IfSuccess(), calls[i]);
+  }
+
+  // Check if we have an exception projection for the call {node}.
+  Node* if_exception = nullptr;
+  for (Edge const edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge) &&
+        edge.from()->opcode() == IrOpcode::kIfException) {
+      if_exception = edge.from();
+      break;
+    }
+  }
+  if (if_exception != nullptr) {
+    // Morph the {if_exception} projection into a join.
+    Node* if_exceptions[kMaxCallPolymorphism + 1];
+    for (int i = 0; i < num_calls; ++i) {
+      if_exceptions[i] =
+          graph()->NewNode(common()->IfException(), calls[i], calls[i]);
+    }
+    Node* exception_control =
+        graph()->NewNode(common()->Merge(num_calls), num_calls, if_exceptions);
+    if_exceptions[num_calls] = exception_control;
+    Node* exception_effect = graph()->NewNode(common()->EffectPhi(num_calls),
+                                              num_calls + 1, if_exceptions);
+    Node* exception_value = graph()->NewNode(
+        common()->Phi(MachineRepresentation::kTagged, num_calls), num_calls + 1,
+        if_exceptions);
+    ReplaceWithValue(if_exception, exception_value, exception_effect,
+                     exception_control);
+  }
+
+  // Morph the call site into the dispatched call sites.
+  Node* control =
+      graph()->NewNode(common()->Merge(num_calls), num_calls, if_successes);
+  calls[num_calls] = control;
+  Node* effect =
+      graph()->NewNode(common()->EffectPhi(num_calls), num_calls + 1, calls);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, num_calls),
+                       num_calls + 1, calls);
+  ReplaceWithValue(node, value, effect, control);
+
+  // Inline the individual, cloned call sites.
+  for (int i = 0; i < num_calls; ++i) {
+    Handle<JSFunction> function = candidate.functions[i];
+    Node* node = calls[i];
+    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    if (reduction.Changed()) {
+      cumulative_count_ += function->shared()->ast_node_count();
+    }
+  }
+
+  return Replace(value);
+}
 
 bool JSInliningHeuristic::CandidateCompare::operator()(
     const Candidate& left, const Candidate& right) const {
-  if (left.calls != right.calls) {
-    return left.calls > right.calls;
+  if (left.frequency > right.frequency) {
+    return true;
+  } else if (left.frequency < right.frequency) {
+    return false;
+  } else {
+    return left.node->id() > right.node->id();
   }
-  return left.node < right.node;
 }
 
-
 void JSInliningHeuristic::PrintCandidates() {
   PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
   for (const Candidate& candidate : candidates_) {
-    PrintF("  id:%d, calls:%d, size[source]:%d, size[ast]:%d / %s\n",
-           candidate.node->id(), candidate.calls,
-           candidate.function->shared()->SourceSize(),
-           candidate.function->shared()->ast_node_count(),
-           candidate.function->shared()->DebugName()->ToCString().get());
+    PrintF("  #%d:%s, frequency:%g\n", candidate.node->id(),
+           candidate.node->op()->mnemonic(), candidate.frequency);
+    for (int i = 0; i < candidate.num_functions; ++i) {
+      Handle<JSFunction> function = candidate.functions[i];
+      PrintF("  - size:%d, name: %s\n", function->shared()->ast_node_count(),
+             function->shared()->DebugName()->ToCString().get());
+    }
   }
 }
 
+Graph* JSInliningHeuristic::graph() const { return jsgraph()->graph(); }
+
+CommonOperatorBuilder* JSInliningHeuristic::common() const {
+  return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* JSInliningHeuristic::simplified() const {
+  return jsgraph()->simplified();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h
index 7f57747..367e35a 100644
--- a/src/compiler/js-inlining-heuristic.h
+++ b/src/compiler/js-inlining-heuristic.h
@@ -21,7 +21,7 @@
         inliner_(editor, local_zone, info, jsgraph),
         candidates_(local_zone),
         seen_(local_zone),
-        info_(info) {}
+        jsgraph_(jsgraph) {}
 
   Reduction Reduce(Node* node) final;
 
@@ -30,10 +30,15 @@
   void Finalize() final;
 
  private:
+  // This limit currently matches what Crankshaft does. We may want to
+  // re-evaluate and come up with a proper limit for TurboFan.
+  static const int kMaxCallPolymorphism = 4;
+
   struct Candidate {
-    Handle<JSFunction> function;  // The call target being inlined.
-    Node* node;                   // The call site at which to inline.
-    int calls;                    // Number of times the call site was hit.
+    Handle<JSFunction> functions[kMaxCallPolymorphism];
+    int num_functions;
+    Node* node = nullptr;    // The call site at which to inline.
+    float frequency = 0.0f;  // Relative frequency of this call site.
   };
 
   // Comparator for candidates.
@@ -46,12 +51,18 @@
 
   // Dumps candidates to console.
   void PrintCandidates();
+  Reduction InlineCandidate(Candidate const& candidate);
+
+  CommonOperatorBuilder* common() const;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  SimplifiedOperatorBuilder* simplified() const;
 
   Mode const mode_;
   JSInliner inliner_;
   Candidates candidates_;
   ZoneSet<NodeId> seen_;
-  CompilationInfo* info_;
+  JSGraph* const jsgraph_;
   int cumulative_count_ = 0;
 };
 
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 635daa4..58e5a27 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -6,10 +6,12 @@
 
 #include "src/ast/ast-numbering.h"
 #include "src/ast/ast.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
+#include "src/compiler/all-nodes.h"
 #include "src/compiler/ast-graph-builder.h"
 #include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/compiler/bytecode-graph-builder.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
@@ -68,13 +70,20 @@
     return call_->op()->ValueInputCount() - 2;
   }
 
+  float frequency() const {
+    return (call_->opcode() == IrOpcode::kJSCallFunction)
+               ? CallFunctionParametersOf(call_->op()).frequency()
+               : CallConstructParametersOf(call_->op()).frequency();
+  }
+
  private:
   Node* call_;
 };
 
-
 Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
-                                Node* frame_state, Node* start, Node* end) {
+                                Node* frame_state, Node* start, Node* end,
+                                Node* exception_target,
+                                const NodeVector& uncaught_subcalls) {
   // The scheduler is smart enough to place our code; we just ensure {control}
   // becomes the control input of the start of the inlinee, and {effect} becomes
   // the effect input of the start of the inlinee.
@@ -131,6 +140,44 @@
     }
   }
 
+  if (exception_target != nullptr) {
+    // Link uncaught calls in the inlinee to {exception_target}
+    int subcall_count = static_cast<int>(uncaught_subcalls.size());
+    if (subcall_count > 0) {
+      TRACE(
+          "Inlinee contains %d calls without IfException; "
+          "linking to existing IfException\n",
+          subcall_count);
+    }
+    NodeVector on_exception_nodes(local_zone_);
+    for (Node* subcall : uncaught_subcalls) {
+      Node* on_exception =
+          graph()->NewNode(common()->IfException(), subcall, subcall);
+      on_exception_nodes.push_back(on_exception);
+    }
+
+    DCHECK_EQ(subcall_count, static_cast<int>(on_exception_nodes.size()));
+    if (subcall_count > 0) {
+      Node* control_output =
+          graph()->NewNode(common()->Merge(subcall_count), subcall_count,
+                           &on_exception_nodes.front());
+      NodeVector values_effects(local_zone_);
+      values_effects = on_exception_nodes;
+      values_effects.push_back(control_output);
+      Node* value_output = graph()->NewNode(
+          common()->Phi(MachineRepresentation::kTagged, subcall_count),
+          subcall_count + 1, &values_effects.front());
+      Node* effect_output =
+          graph()->NewNode(common()->EffectPhi(subcall_count),
+                           subcall_count + 1, &values_effects.front());
+      ReplaceWithValue(exception_target, value_output, effect_output,
+                       control_output);
+    } else {
+      ReplaceWithValue(exception_target, exception_target, exception_target,
+                       jsgraph()->Dead());
+    }
+  }
+
   NodeVector values(local_zone_);
   NodeVector effects(local_zone_);
   NodeVector controls(local_zone_);
@@ -235,6 +282,56 @@
 
 namespace {
 
+// TODO(bmeurer): Unify this with the witness helper functions in the
+// js-builtin-reducer.cc once we have a better understanding of the
+// map tracking we want to do, and eventually changed the CheckMaps
+// operator to carry map constants on the operator instead of inputs.
+// I.e. if the CheckMaps has some kind of SmallMapSet as operator
+// parameter, then this could be changed to call a generic
+//
+//   SmallMapSet NodeProperties::CollectMapWitness(receiver, effect)
+//
+// function, which either returns the map set from the CheckMaps or
+// a singleton set from a StoreField.
+bool NeedsConvertReceiver(Node* receiver, Node* effect) {
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckMaps &&
+        dominator->InputAt(0) == receiver) {
+      // Check if all maps have the given {instance_type}.
+      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+        HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
+        if (!m.HasValue()) return true;
+        Handle<Map> const map = Handle<Map>::cast(m.Value());
+        if (!map->IsJSReceiverMap()) return true;
+      }
+      return false;
+    }
+    switch (dominator->opcode()) {
+      case IrOpcode::kStoreField: {
+        FieldAccess const& access = FieldAccessOf(dominator->op());
+        if (access.base_is_tagged == kTaggedBase &&
+            access.offset == HeapObject::kMapOffset) {
+          return true;
+        }
+        break;
+      }
+      case IrOpcode::kStoreElement:
+      case IrOpcode::kStoreTypedElement:
+        break;
+      default: {
+        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+        if (dominator->op()->EffectInputCount() != 1 ||
+            !dominator->op()->HasProperty(Operator::kNoWrite)) {
+          // Didn't find any appropriate CheckMaps node.
+          return true;
+        }
+        break;
+      }
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
 // TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
 bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
   DisallowHeapAllocation no_gc;
@@ -270,7 +367,6 @@
   return ReduceJSCall(node, function);
 }
 
-
 Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
   JSCallAccessor call(node);
@@ -344,12 +440,35 @@
     }
   }
 
-  // TODO(turbofan): Inlining into a try-block is not yet supported.
-  if (NodeProperties::IsExceptionalCall(node)) {
-    TRACE("Not inlining %s into %s because of surrounding try-block\n",
+  // Find the IfException node, if any.
+  Node* exception_target = nullptr;
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge) &&
+        edge.from()->opcode() == IrOpcode::kIfException) {
+      DCHECK_NULL(exception_target);
+      exception_target = edge.from();
+    }
+  }
+
+  NodeVector uncaught_subcalls(local_zone_);
+
+  if (exception_target != nullptr) {
+    if (!FLAG_inline_into_try) {
+      TRACE(
+          "Try block surrounds #%d:%s and --no-inline-into-try active, so not "
+          "inlining %s into %s.\n",
+          exception_target->id(), exception_target->op()->mnemonic(),
           shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
+      return NoChange();
+    } else {
+      TRACE(
+          "Inlining %s into %s regardless of surrounding try-block to catcher "
+          "#%d:%s\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get(),
+          exception_target->id(), exception_target->op()->mnemonic());
+    }
   }
 
   Zone zone(info_->isolate()->allocator());
@@ -357,8 +476,20 @@
   CompilationInfo info(&parse_info, function);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
   if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
+  if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
 
-  if (!Compiler::ParseAndAnalyze(info.parse_info())) {
+  if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+    TRACE("Not inlining %s into %s because bytecode generation failed\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get());
+    if (info_->isolate()->has_pending_exception()) {
+      info_->isolate()->clear_pending_exception();
+    }
+    return NoChange();
+  }
+
+  if (!info.is_optimizing_from_bytecode() &&
+      !Compiler::ParseAndAnalyze(info.parse_info())) {
     TRACE("Not inlining %s into %s because parsing failed\n",
           shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
@@ -368,7 +499,8 @@
     return NoChange();
   }
 
-  if (!Compiler::EnsureDeoptimizationSupport(&info)) {
+  if (!info.is_optimizing_from_bytecode() &&
+      !Compiler::EnsureDeoptimizationSupport(&info)) {
     TRACE("Not inlining %s into %s because deoptimization support failed\n",
           shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
@@ -388,13 +520,23 @@
         shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
-  // If function was lazily compiled, it's literals array may not yet be set up.
+  // If function was lazily compiled, its literals array may not yet be set up.
   JSFunction::EnsureLiterals(function);
 
   // Create the subgraph for the inlinee.
   Node* start;
   Node* end;
-  {
+  if (info.is_optimizing_from_bytecode()) {
+    // Run the BytecodeGraphBuilder to create the subgraph.
+    Graph::SubgraphScope scope(graph());
+    BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
+                                       call.frequency());
+    graph_builder.CreateGraph();
+
+    // Extract the inlinee start/end nodes.
+    start = graph()->start();
+    end = graph()->end();
+  } else {
     // Run the loop assignment analyzer on the inlinee.
     AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
     LoopAssignmentAnalysis* loop_assignment =
@@ -407,8 +549,8 @@
 
     // Run the AstGraphBuilder to create the subgraph.
     Graph::SubgraphScope scope(graph());
-    AstGraphBuilder graph_builder(&zone, &info, jsgraph(), loop_assignment,
-                                  type_hint_analysis);
+    AstGraphBuilder graph_builder(&zone, &info, jsgraph(), call.frequency(),
+                                  loop_assignment, type_hint_analysis);
     graph_builder.CreateGraph(false);
 
     // Extract the inlinee start/end nodes.
@@ -416,6 +558,29 @@
     end = graph()->end();
   }
 
+  if (exception_target != nullptr) {
+    // Find all uncaught 'calls' in the inlinee.
+    AllNodes inlined_nodes(local_zone_, end, graph());
+    for (Node* subnode : inlined_nodes.reachable) {
+      // Every possibly throwing node with an IfSuccess should get an
+      // IfException.
+      if (subnode->op()->HasProperty(Operator::kNoThrow)) {
+        continue;
+      }
+      bool hasIfException = false;
+      for (Node* use : subnode->uses()) {
+        if (use->opcode() == IrOpcode::kIfException) {
+          hasIfException = true;
+          break;
+        }
+      }
+      if (!hasIfException) {
+        DCHECK_EQ(2, subnode->op()->ControlOutputCount());
+        uncaught_subcalls.push_back(subnode);
+      }
+    }
+  }
+
   Node* frame_state = call.frame_state();
   Node* new_target = jsgraph()->UndefinedConstant();
 
@@ -475,15 +640,17 @@
   // in that frame state tho, as the conversion of the receiver can be repeated
   // any number of times, it's not observable.
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
-    const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
-    Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+      is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
     Node* effect = NodeProperties::GetEffectInput(node);
-    Node* convert = graph()->NewNode(
-        javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
-        context, frame_state_before, effect, start);
-    NodeProperties::ReplaceValueInput(node, convert, 1);
-    NodeProperties::ReplaceEffectInput(node, convert);
+    if (NeedsConvertReceiver(call.receiver(), effect)) {
+      const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+      Node* convert = effect = graph()->NewNode(
+          javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
+          context, frame_state_before, effect, start);
+      NodeProperties::ReplaceValueInput(node, convert, 1);
+      NodeProperties::ReplaceEffectInput(node, effect);
+    }
   }
 
   // If we are inlining a JS call at tail position then we have to pop current
@@ -504,7 +671,7 @@
   // count (i.e. value outputs of start node minus target, receiver, new target,
   // arguments count and context) have to match the number of arguments passed
   // to the call.
-  int parameter_count = info.literal()->parameter_count();
+  int parameter_count = shared_info->internal_formal_parameter_count();
   DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
@@ -512,7 +679,8 @@
         FrameStateType::kArgumentsAdaptor, shared_info);
   }
 
-  return InlineCall(node, new_target, context, frame_state, start, end);
+  return InlineCall(node, new_target, context, frame_state, start, end,
+                    exception_target, uncaught_subcalls);
 }
 
 Graph* JSInliner::graph() const { return jsgraph()->graph(); }
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 49487f5..323c3ae 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -54,7 +54,9 @@
   Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
 
   Reduction InlineCall(Node* call, Node* new_target, Node* context,
-                       Node* frame_state, Node* start, Node* end);
+                       Node* frame_state, Node* start, Node* end,
+                       Node* exception_target,
+                       const NodeVector& uncaught_subcalls);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 3324508..7fc50e5 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -302,10 +302,10 @@
 
 Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(node,
-                           javascript()->CallFunction(arity, VectorSlotPair(),
-                                                      ConvertReceiverMode::kAny,
-                                                      TailCallMode::kDisallow));
+  NodeProperties::ChangeOp(
+      node, javascript()->CallFunction(arity, 0.0f, VectorSlotPair(),
+                                       ConvertReceiverMode::kAny,
+                                       TailCallMode::kDisallow));
   return Changed(node);
 }
 
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index b76744e..ab20d93 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -13,9 +13,9 @@
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
+#include "src/compiler/type-cache.h"
 #include "src/field-index-inl.h"
 #include "src/isolate-inl.h"
-#include "src/type-cache.h"
 #include "src/type-feedback-vector.h"
 
 namespace v8 {
@@ -70,6 +70,8 @@
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kJSInstanceOf:
+      return ReduceJSInstanceOf(node);
     case IrOpcode::kJSLoadContext:
       return ReduceJSLoadContext(node);
     case IrOpcode::kJSLoadNamed:
@@ -86,6 +88,99 @@
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+  Node* object = NodeProperties::GetValueInput(node, 0);
+  Node* constructor = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Retrieve the native context from the given {node}.
+  Handle<Context> native_context;
+  if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+  // If deoptimization is disabled, we cannot optimize.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Check if the right hand side is a known {receiver}.
+  HeapObjectMatcher m(constructor);
+  if (!m.HasValue() || !m.Value()->IsJSObject()) return NoChange();
+  Handle<JSObject> receiver = Handle<JSObject>::cast(m.Value());
+  Handle<Map> receiver_map(receiver->map(), isolate());
+
+  // Compute property access info for @@hasInstance on {receiver}.
+  PropertyAccessInfo access_info;
+  AccessInfoFactory access_info_factory(dependencies(), native_context,
+                                        graph()->zone());
+  if (!access_info_factory.ComputePropertyAccessInfo(
+          receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
+          &access_info)) {
+    return NoChange();
+  }
+
+  if (access_info.IsNotFound()) {
+    // If there's no @@hasInstance handler, the OrdinaryHasInstance operation
+    // takes over, but that requires the {receiver} to be callable.
+    if (receiver->IsCallable()) {
+      // Determine actual holder and perform prototype chain checks.
+      Handle<JSObject> holder;
+      if (access_info.holder().ToHandle(&holder)) {
+        AssumePrototypesStable(access_info.receiver_maps(), native_context,
+                               holder);
+      }
+
+      // Monomorphic property access.
+      effect =
+          BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+
+      // Lower to OrdinaryHasInstance(C, O).
+      NodeProperties::ReplaceValueInput(node, constructor, 0);
+      NodeProperties::ReplaceValueInput(node, object, 1);
+      NodeProperties::ReplaceEffectInput(node, effect);
+      NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+      return Changed(node);
+    }
+  } else if (access_info.IsDataConstant()) {
+    DCHECK(access_info.constant()->IsCallable());
+
+    // Determine actual holder and perform prototype chain checks.
+    Handle<JSObject> holder;
+    if (access_info.holder().ToHandle(&holder)) {
+      AssumePrototypesStable(access_info.receiver_maps(), native_context,
+                             holder);
+    }
+
+    // Monomorphic property access.
+    effect =
+        BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+
+    // Call the @@hasInstance handler.
+    Node* target = jsgraph()->Constant(access_info.constant());
+    node->InsertInput(graph()->zone(), 0, target);
+    node->ReplaceInput(1, constructor);
+    node->ReplaceInput(2, object);
+    node->ReplaceInput(5, effect);
+    NodeProperties::ChangeOp(
+        node,
+        javascript()->CallFunction(3, 0.0f, VectorSlotPair(),
+                                   ConvertReceiverMode::kNotNullOrUndefined));
+
+    // Rewire the value uses of {node} to ToBoolean conversion of the result.
+    Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+                                   node, context);
+    for (Edge edge : node->use_edges()) {
+      if (NodeProperties::IsValueEdge(edge) && edge.from() != value) {
+        edge.UpdateTo(value);
+        Revisit(edge.from());
+      }
+    }
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
@@ -168,7 +263,7 @@
                                            receiver, effect, control);
     } else {
       // Monomorphic property access.
-      effect = BuildCheckTaggedPointer(receiver, effect, control);
+      effect = BuildCheckHeapObject(receiver, effect, control);
       effect = BuildCheckMaps(receiver, effect, control,
                               access_info.receiver_maps());
     }
@@ -206,7 +301,7 @@
       receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
       receiverissmi_effect = effect;
     } else {
-      effect = BuildCheckTaggedPointer(receiver, effect, control);
+      effect = BuildCheckHeapObject(receiver, effect, control);
     }
 
     // Load the {receiver} map. The resulting effect is the dominating effect
@@ -510,7 +605,7 @@
     }
 
     // Ensure that {receiver} is a heap object.
-    effect = BuildCheckTaggedPointer(receiver, effect, control);
+    effect = BuildCheckHeapObject(receiver, effect, control);
 
     // Check for the monomorphic case.
     if (access_infos.size() == 1) {
@@ -818,13 +913,14 @@
     DCHECK_EQ(AccessMode::kLoad, access_mode);
     value = jsgraph()->UndefinedConstant();
   } else if (access_info.IsDataConstant()) {
-    value = jsgraph()->Constant(access_info.constant());
+    Node* constant_value = jsgraph()->Constant(access_info.constant());
     if (access_mode == AccessMode::kStore) {
-      Node* check =
-          graph()->NewNode(simplified()->ReferenceEqual(), value, value);
+      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
+                                     constant_value);
       effect =
           graph()->NewNode(simplified()->CheckIf(), check, effect, control);
     }
+    value = constant_value;
   } else if (access_info.IsAccessorConstant()) {
     // TODO(bmeurer): Properly rewire the IfException edge here if there's any.
     Node* target = jsgraph()->Constant(access_info.constant());
@@ -849,7 +945,8 @@
         // Introduce the call to the getter function.
         value = effect = graph()->NewNode(
             javascript()->CallFunction(
-                2, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+                2, 0.0f, VectorSlotPair(),
+                ConvertReceiverMode::kNotNullOrUndefined),
             target, receiver, context, frame_state0, effect, control);
         control = graph()->NewNode(common()->IfSuccess(), value);
         break;
@@ -869,10 +966,11 @@
             context, target, frame_state);
 
         // Introduce the call to the setter function.
-        effect = graph()->NewNode(
-            javascript()->CallFunction(
-                3, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
-            target, receiver, value, context, frame_state0, effect, control);
+        effect = graph()->NewNode(javascript()->CallFunction(
+                                      3, 0.0f, VectorSlotPair(),
+                                      ConvertReceiverMode::kNotNullOrUndefined),
+                                  target, receiver, value, context,
+                                  frame_state0, effect, control);
         control = graph()->NewNode(common()->IfSuccess(), effect);
         break;
       }
@@ -881,9 +979,25 @@
     DCHECK(access_info.IsDataField());
     FieldIndex const field_index = access_info.field_index();
     Type* const field_type = access_info.field_type();
-    if (access_mode == AccessMode::kLoad &&
-        access_info.holder().ToHandle(&holder)) {
-      receiver = jsgraph()->Constant(holder);
+    MachineRepresentation const field_representation =
+        access_info.field_representation();
+    if (access_mode == AccessMode::kLoad) {
+      if (access_info.holder().ToHandle(&holder)) {
+        receiver = jsgraph()->Constant(holder);
+      }
+      // Optimize immutable property loads.
+      HeapObjectMatcher m(receiver);
+      if (m.HasValue() && m.Value()->IsJSObject()) {
+        // TODO(turbofan): Given that we already have the field_index here, we
+        // might be smarter in the future and not rely on the LookupIterator,
+        // but for now let's just do what Crankshaft does.
+        LookupIterator it(m.Value(), name,
+                          LookupIterator::OWN_SKIP_INTERCEPTOR);
+        if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+          Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+          return ValueEffectControl(value, effect, control);
+        }
+      }
     }
     Node* storage = receiver;
     if (!field_index.is_inobject()) {
@@ -892,89 +1006,112 @@
           storage, effect, control);
     }
     FieldAccess field_access = {
-        kTaggedBase, field_index.offset(),     name,
-        field_type,  MachineType::AnyTagged(), kFullWriteBarrier};
+        kTaggedBase,
+        field_index.offset(),
+        name,
+        field_type,
+        MachineType::TypeForRepresentation(field_representation),
+        kFullWriteBarrier};
     if (access_mode == AccessMode::kLoad) {
-      if (field_type->Is(Type::UntaggedFloat64())) {
-        // TODO(turbofan): We remove the representation axis from the type to
-        // avoid uninhabited representation types. This is a workaround until
-        // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
-        field_access.type = Type::Union(
-            field_type, Type::Representation(Type::Number(), zone()), zone());
+      if (field_representation == MachineRepresentation::kFloat64) {
         if (!field_index.is_inobject() || field_index.is_hidden_field() ||
             !FLAG_unbox_double_fields) {
-          storage = effect = graph()->NewNode(
-              simplified()->LoadField(field_access), storage, effect, control);
+          FieldAccess const storage_access = {kTaggedBase,
+                                              field_index.offset(),
+                                              name,
+                                              Type::OtherInternal(),
+                                              MachineType::TaggedPointer(),
+                                              kPointerWriteBarrier};
+          storage = effect =
+              graph()->NewNode(simplified()->LoadField(storage_access), storage,
+                               effect, control);
           field_access.offset = HeapNumber::kValueOffset;
           field_access.name = MaybeHandle<Name>();
         }
-        field_access.machine_type = MachineType::Float64();
       }
+      // TODO(turbofan): Track the field_map (if any) on the {field_access} and
+      // use it in LoadElimination to eliminate map checks.
       value = effect = graph()->NewNode(simplified()->LoadField(field_access),
                                         storage, effect, control);
     } else {
       DCHECK_EQ(AccessMode::kStore, access_mode);
-      if (field_type->Is(Type::UntaggedFloat64())) {
-        // TODO(turbofan): We remove the representation axis from the type to
-        // avoid uninhabited representation types. This is a workaround until
-        // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
-        field_access.type = Type::Union(
-            field_type, Type::Representation(Type::Number(), zone()), zone());
-        value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
-                                          effect, control);
+      switch (field_representation) {
+        case MachineRepresentation::kFloat64: {
+          value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+                                            effect, control);
+          if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+              !FLAG_unbox_double_fields) {
+            if (access_info.HasTransitionMap()) {
+              // Allocate a MutableHeapNumber for the new property.
+              effect = graph()->NewNode(
+                  common()->BeginRegion(RegionObservability::kNotObservable),
+                  effect);
+              Node* box = effect = graph()->NewNode(
+                  simplified()->Allocate(NOT_TENURED),
+                  jsgraph()->Constant(HeapNumber::kSize), effect, control);
+              effect = graph()->NewNode(
+                  simplified()->StoreField(AccessBuilder::ForMap()), box,
+                  jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+                  effect, control);
+              effect = graph()->NewNode(
+                  simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+                  box, value, effect, control);
+              value = effect =
+                  graph()->NewNode(common()->FinishRegion(), box, effect);
 
-        if (!field_index.is_inobject() || field_index.is_hidden_field() ||
-            !FLAG_unbox_double_fields) {
-          if (access_info.HasTransitionMap()) {
-            // Allocate a MutableHeapNumber for the new property.
-            effect = graph()->NewNode(
-                common()->BeginRegion(RegionObservability::kNotObservable),
-                effect);
-            Node* box = effect = graph()->NewNode(
-                simplified()->Allocate(NOT_TENURED),
-                jsgraph()->Constant(HeapNumber::kSize), effect, control);
-            effect = graph()->NewNode(
-                simplified()->StoreField(AccessBuilder::ForMap()), box,
-                jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
-                effect, control);
-            effect = graph()->NewNode(
-                simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
-                box, value, effect, control);
-            value = effect =
-                graph()->NewNode(common()->FinishRegion(), box, effect);
-
-            field_access.type = Type::TaggedPointer();
-          } else {
-            // We just store directly to the MutableHeapNumber.
-            storage = effect =
-                graph()->NewNode(simplified()->LoadField(field_access), storage,
-                                 effect, control);
-            field_access.offset = HeapNumber::kValueOffset;
-            field_access.name = MaybeHandle<Name>();
-            field_access.machine_type = MachineType::Float64();
+              field_access.type = Type::Any();
+              field_access.machine_type = MachineType::TaggedPointer();
+              field_access.write_barrier_kind = kPointerWriteBarrier;
+            } else {
+              // We just store directly to the MutableHeapNumber.
+              FieldAccess const storage_access = {kTaggedBase,
+                                                  field_index.offset(),
+                                                  name,
+                                                  Type::OtherInternal(),
+                                                  MachineType::TaggedPointer(),
+                                                  kPointerWriteBarrier};
+              storage = effect =
+                  graph()->NewNode(simplified()->LoadField(storage_access),
+                                   storage, effect, control);
+              field_access.offset = HeapNumber::kValueOffset;
+              field_access.name = MaybeHandle<Name>();
+              field_access.machine_type = MachineType::Float64();
+            }
           }
-        } else {
-          // Unboxed double field, we store directly to the field.
-          field_access.machine_type = MachineType::Float64();
+          break;
         }
-      } else if (field_type->Is(Type::TaggedSigned())) {
-        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
-                                          value, effect, control);
-      } else if (field_type->Is(Type::TaggedPointer())) {
-        // Ensure that {value} is a HeapObject.
-        value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
-                                          value, effect, control);
-        if (field_type->NumClasses() == 1) {
-          // Emit a map check for the value.
-          Node* field_map =
-              jsgraph()->Constant(field_type->Classes().Current());
-          effect = graph()->NewNode(simplified()->CheckMaps(1), value,
-                                    field_map, effect, control);
-        } else {
-          DCHECK_EQ(0, field_type->NumClasses());
+        case MachineRepresentation::kTaggedSigned: {
+          value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+                                            effect, control);
+          field_access.write_barrier_kind = kNoWriteBarrier;
+          break;
         }
-      } else {
-        DCHECK(field_type->Is(Type::Tagged()));
+        case MachineRepresentation::kTaggedPointer: {
+          // Ensure that {value} is a HeapObject.
+          value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                            value, effect, control);
+          Handle<Map> field_map;
+          if (access_info.field_map().ToHandle(&field_map)) {
+            // Emit a map check for the value.
+            effect = graph()->NewNode(simplified()->CheckMaps(1), value,
+                                      jsgraph()->HeapConstant(field_map),
+                                      effect, control);
+          }
+          field_access.write_barrier_kind = kPointerWriteBarrier;
+          break;
+        }
+        case MachineRepresentation::kTagged:
+          break;
+        case MachineRepresentation::kNone:
+        case MachineRepresentation::kBit:
+        case MachineRepresentation::kWord8:
+        case MachineRepresentation::kWord16:
+        case MachineRepresentation::kWord32:
+        case MachineRepresentation::kWord64:
+        case MachineRepresentation::kFloat32:
+        case MachineRepresentation::kSimd128:
+          UNREACHABLE();
+          break;
       }
       Handle<Map> transition_map;
       if (access_info.transition_map().ToHandle(&transition_map)) {
@@ -1048,20 +1185,13 @@
     Node* buffer = effect = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
         receiver, effect, control);
-    Node* buffer_bitfield = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
-        buffer, effect, control);
-    Node* check = graph()->NewNode(
-        simplified()->NumberEqual(),
-        graph()->NewNode(
-            simplified()->NumberBitwiseAnd(), buffer_bitfield,
-            jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
-        jsgraph()->ZeroConstant());
+    Node* check = effect = graph()->NewNode(
+        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
 
     // Default to zero if the {receiver}s buffer was neutered.
     length = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
-        check, length, jsgraph()->ZeroConstant());
+        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+        check, jsgraph()->ZeroConstant(), length);
 
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       // Check that the {index} is a valid array index, we do the actual
@@ -1175,6 +1305,7 @@
       element_machine_type = MachineType::Float64();
     } else if (IsFastSmiElementsKind(elements_kind)) {
       element_type = type_cache_.kSmi;
+      element_machine_type = MachineType::TaggedSigned();
     }
     ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
                                     element_type, element_machine_type,
@@ -1188,6 +1319,7 @@
           elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
         element_access.type =
             Type::Union(element_type, Type::Hole(), graph()->zone());
+        element_access.machine_type = MachineType::AnyTagged();
       }
       // Perform the actual backing store access.
       value = effect =
@@ -1221,8 +1353,8 @@
     } else {
       DCHECK_EQ(AccessMode::kStore, access_mode);
       if (IsFastSmiElementsKind(elements_kind)) {
-        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
-                                          value, effect, control);
+        value = effect =
+            graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
       } else if (IsFastDoubleElementsKind(elements_kind)) {
         value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
                                           effect, control);
@@ -1293,9 +1425,9 @@
                           inputs);
 }
 
-Node* JSNativeContextSpecialization::BuildCheckTaggedPointer(Node* receiver,
-                                                             Node* effect,
-                                                             Node* control) {
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+                                                          Node* effect,
+                                                          Node* control) {
   switch (receiver->opcode()) {
     case IrOpcode::kHeapConstant:
     case IrOpcode::kJSCreate:
@@ -1314,8 +1446,8 @@
       return effect;
     }
     default: {
-      return graph()->NewNode(simplified()->CheckTaggedPointer(), receiver,
-                              effect, control);
+      return graph()->NewNode(simplified()->CheckHeapObject(), receiver, effect,
+                              control);
     }
   }
 }
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 549dc93..c015de0 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -16,8 +16,6 @@
 class CompilationDependencies;
 class Factory;
 class FeedbackNexus;
-class TypeCache;
-
 
 namespace compiler {
 
@@ -30,7 +28,7 @@
 class MachineOperatorBuilder;
 class PropertyAccessInfo;
 class SimplifiedOperatorBuilder;
-
+class TypeCache;
 
 // Specializes a given JSGraph to a given native context, potentially constant
 // folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
@@ -55,6 +53,7 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceJSInstanceOf(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
@@ -120,7 +119,7 @@
                        std::vector<Handle<Map>> const& maps);
 
   // Construct an appropriate heap object check.
-  Node* BuildCheckTaggedPointer(Node* receiver, Node* effect, Node* control);
+  Node* BuildCheckHeapObject(Node* receiver, Node* effect, Node* control);
 
   // Adds stability dependencies on all prototypes of every class in
   // {receiver_type} up to (and including) the {holder}.
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index d19bb76..21e905a 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -54,7 +54,8 @@
 
 bool operator==(CallConstructParameters const& lhs,
                 CallConstructParameters const& rhs) {
-  return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
+  return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
+         lhs.feedback() == rhs.feedback();
 }
 
 
@@ -65,12 +66,12 @@
 
 
 size_t hash_value(CallConstructParameters const& p) {
-  return base::hash_combine(p.arity(), p.feedback());
+  return base::hash_combine(p.arity(), p.frequency(), p.feedback());
 }
 
 
 std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
-  return os << p.arity();
+  return os << p.arity() << ", " << p.frequency();
 }
 
 
@@ -81,7 +82,8 @@
 
 
 std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
-  os << p.arity() << ", " << p.convert_mode() << ", " << p.tail_call_mode();
+  os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
+     << p.tail_call_mode();
   return os;
 }
 
@@ -157,6 +159,37 @@
   return OpParameter<ContextAccess>(op);
 }
 
+CreateCatchContextParameters::CreateCatchContextParameters(
+    Handle<String> catch_name, Handle<ScopeInfo> scope_info)
+    : catch_name_(catch_name), scope_info_(scope_info) {}
+
+bool operator==(CreateCatchContextParameters const& lhs,
+                CreateCatchContextParameters const& rhs) {
+  return lhs.catch_name().location() == rhs.catch_name().location() &&
+         lhs.scope_info().location() == rhs.scope_info().location();
+}
+
+bool operator!=(CreateCatchContextParameters const& lhs,
+                CreateCatchContextParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CreateCatchContextParameters const& parameters) {
+  return base::hash_combine(parameters.catch_name().location(),
+                            parameters.scope_info().location());
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateCatchContextParameters const& parameters) {
+  return os << Brief(*parameters.catch_name()) << ", "
+            << Brief(*parameters.scope_info());
+}
+
+CreateCatchContextParameters const& CreateCatchContextParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCreateCatchContext, op->opcode());
+  return OpParameter<CreateCatchContextParameters>(op);
+}
 
 bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
   return lhs.name().location() == rhs.name().location() &&
@@ -376,7 +409,7 @@
   return OpParameter<CreateLiteralParameters>(op);
 }
 
-const BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
+BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
          op->opcode() == IrOpcode::kJSBitwiseXor ||
          op->opcode() == IrOpcode::kJSBitwiseAnd ||
@@ -391,7 +424,7 @@
   return OpParameter<BinaryOperationHint>(op);
 }
 
-const CompareOperationHint CompareOperationHintOf(const Operator* op) {
+CompareOperationHint CompareOperationHintOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kJSEqual ||
          op->opcode() == IrOpcode::kJSNotEqual ||
          op->opcode() == IrOpcode::kJSStrictEqual ||
@@ -415,15 +448,13 @@
   V(HasProperty, Operator::kNoProperties, 2, 1)             \
   V(TypeOf, Operator::kPure, 1, 1)                          \
   V(InstanceOf, Operator::kNoProperties, 2, 1)              \
-  V(ForInDone, Operator::kPure, 2, 1)                       \
+  V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1)     \
   V(ForInNext, Operator::kNoProperties, 4, 1)               \
   V(ForInPrepare, Operator::kNoProperties, 1, 3)            \
-  V(ForInStep, Operator::kPure, 1, 1)                       \
   V(LoadMessage, Operator::kNoThrow, 0, 1)                  \
   V(StoreMessage, Operator::kNoThrow, 1, 0)                 \
   V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
-  V(StackCheck, Operator::kNoWrite, 0, 0)                   \
-  V(CreateWithContext, Operator::kNoProperties, 2, 1)
+  V(StackCheck, Operator::kNoWrite, 0, 0)
 
 #define BINARY_OP_LIST(V) \
   V(BitwiseOr)            \
@@ -476,6 +507,7 @@
   Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator;   \
   Name##Operator<BinaryOperationHint::kNumberOrOddball>                       \
       k##Name##NumberOrOddballOperator;                                       \
+  Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator;       \
   Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
   BINARY_OP_LIST(BINARY_OP)
 #undef BINARY_OP
@@ -523,6 +555,8 @@
         return &cache_.k##Name##Signed32Operator;                     \
       case BinaryOperationHint::kNumberOrOddball:                     \
         return &cache_.k##Name##NumberOrOddballOperator;              \
+      case BinaryOperationHint::kString:                              \
+        return &cache_.k##Name##StringOperator;                       \
       case BinaryOperationHint::kAny:                                 \
         return &cache_.k##Name##AnyOperator;                          \
     }                                                                 \
@@ -562,9 +596,9 @@
 }
 
 const Operator* JSOperatorBuilder::CallFunction(
-    size_t arity, VectorSlotPair const& feedback,
+    size_t arity, float frequency, VectorSlotPair const& feedback,
     ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
-  CallFunctionParameters parameters(arity, feedback, tail_call_mode,
+  CallFunctionParameters parameters(arity, frequency, feedback, tail_call_mode,
                                     convert_mode);
   return new (zone()) Operator1<CallFunctionParameters>(   // --
       IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
@@ -598,10 +632,9 @@
       parameters);                                        // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CallConstruct(
-    size_t arity, VectorSlotPair const& feedback) {
-  CallConstructParameters parameters(arity, feedback);
+    uint32_t arity, float frequency, VectorSlotPair const& feedback) {
+  CallConstructParameters parameters(arity, frequency, feedback);
   return new (zone()) Operator1<CallConstructParameters>(   // --
       IrOpcode::kJSCallConstruct, Operator::kNoProperties,  // opcode
       "JSCallConstruct",                                    // name
@@ -811,16 +844,24 @@
       slot_count);                                                  // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateCatchContext(
-    const Handle<String>& name) {
-  return new (zone()) Operator1<Handle<String>>(                 // --
+    const Handle<String>& name, const Handle<ScopeInfo>& scope_info) {
+  CreateCatchContextParameters parameters(name, scope_info);
+  return new (zone()) Operator1<CreateCatchContextParameters>(
       IrOpcode::kJSCreateCatchContext, Operator::kNoProperties,  // opcode
       "JSCreateCatchContext",                                    // name
       2, 1, 1, 1, 1, 2,                                          // counts
-      name);                                                     // parameter
+      parameters);                                               // parameter
 }
 
+const Operator* JSOperatorBuilder::CreateWithContext(
+    const Handle<ScopeInfo>& scope_info) {
+  return new (zone()) Operator1<Handle<ScopeInfo>>(
+      IrOpcode::kJSCreateWithContext, Operator::kNoProperties,  // opcode
+      "JSCreateWithContext",                                    // name
+      2, 1, 1, 1, 1, 2,                                         // counts
+      scope_info);                                              // parameter
+}
 
 const Operator* JSOperatorBuilder::CreateBlockContext(
     const Handle<ScopeInfo>& scpope_info) {
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 19022fa..2374ae6 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_JS_OPERATOR_H_
 #define V8_COMPILER_JS_OPERATOR_H_
 
-#include "src/compiler/type-hints.h"
 #include "src/runtime/runtime.h"
+#include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
@@ -55,14 +55,17 @@
 // used as a parameter by JSCallConstruct operators.
 class CallConstructParameters final {
  public:
-  CallConstructParameters(size_t arity, VectorSlotPair const& feedback)
-      : arity_(arity), feedback_(feedback) {}
+  CallConstructParameters(uint32_t arity, float frequency,
+                          VectorSlotPair const& feedback)
+      : arity_(arity), frequency_(frequency), feedback_(feedback) {}
 
-  size_t arity() const { return arity_; }
+  uint32_t arity() const { return arity_; }
+  float frequency() const { return frequency_; }
   VectorSlotPair const& feedback() const { return feedback_; }
 
  private:
-  size_t const arity_;
+  uint32_t const arity_;
+  float const frequency_;
   VectorSlotPair const feedback_;
 };
 
@@ -80,15 +83,18 @@
 // used as a parameter by JSCallFunction operators.
 class CallFunctionParameters final {
  public:
-  CallFunctionParameters(size_t arity, VectorSlotPair const& feedback,
+  CallFunctionParameters(size_t arity, float frequency,
+                         VectorSlotPair const& feedback,
                          TailCallMode tail_call_mode,
                          ConvertReceiverMode convert_mode)
       : bit_field_(ArityField::encode(arity) |
                    ConvertReceiverModeField::encode(convert_mode) |
                    TailCallModeField::encode(tail_call_mode)),
+        frequency_(frequency),
         feedback_(feedback) {}
 
   size_t arity() const { return ArityField::decode(bit_field_); }
+  float frequency() const { return frequency_; }
   ConvertReceiverMode convert_mode() const {
     return ConvertReceiverModeField::decode(bit_field_);
   }
@@ -99,6 +105,7 @@
 
   bool operator==(CallFunctionParameters const& that) const {
     return this->bit_field_ == that.bit_field_ &&
+           this->frequency_ == that.frequency_ &&
            this->feedback_ == that.feedback_;
   }
   bool operator!=(CallFunctionParameters const& that) const {
@@ -107,15 +114,16 @@
 
  private:
   friend size_t hash_value(CallFunctionParameters const& p) {
-    return base::hash_combine(p.bit_field_, p.feedback_);
+    return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
   }
 
   typedef BitField<size_t, 0, 29> ArityField;
   typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
   typedef BitField<TailCallMode, 31, 1> TailCallModeField;
 
-  const uint32_t bit_field_;
-  const VectorSlotPair feedback_;
+  uint32_t const bit_field_;
+  float const frequency_;
+  VectorSlotPair const feedback_;
 };
 
 size_t hash_value(CallFunctionParameters const&);
@@ -178,6 +186,33 @@
 
 ContextAccess const& ContextAccessOf(Operator const*);
 
+// Defines the name and ScopeInfo for a new catch context. This is used as a
+// parameter by the JSCreateCatchContext operator.
+class CreateCatchContextParameters final {
+ public:
+  CreateCatchContextParameters(Handle<String> catch_name,
+                               Handle<ScopeInfo> scope_info);
+
+  Handle<String> catch_name() const { return catch_name_; }
+  Handle<ScopeInfo> scope_info() const { return scope_info_; }
+
+ private:
+  Handle<String> const catch_name_;
+  Handle<ScopeInfo> const scope_info_;
+};
+
+bool operator==(CreateCatchContextParameters const& lhs,
+                CreateCatchContextParameters const& rhs);
+bool operator!=(CreateCatchContextParameters const& lhs,
+                CreateCatchContextParameters const& rhs);
+
+size_t hash_value(CreateCatchContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateCatchContextParameters const& parameters);
+
+CreateCatchContextParameters const& CreateCatchContextParametersOf(
+    Operator const*);
 
 // Defines the property of an object for a named access. This is
 // used as a parameter by the JSLoadNamed and JSStoreNamed operators.
@@ -374,9 +409,9 @@
 
 const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
 
-const BinaryOperationHint BinaryOperationHintOf(const Operator* op);
+BinaryOperationHint BinaryOperationHintOf(const Operator* op);
 
-const CompareOperationHint CompareOperationHintOf(const Operator* op);
+CompareOperationHint CompareOperationHintOf(const Operator* op);
 
 // Interface for building JavaScript-level operators, e.g. directly from the
 // AST. Most operators have no parameters, thus can be globally shared for all
@@ -430,13 +465,15 @@
                                       int literal_flags, int literal_index);
 
   const Operator* CallFunction(
-      size_t arity, VectorSlotPair const& feedback = VectorSlotPair(),
+      size_t arity, float frequency = 0.0f,
+      VectorSlotPair const& feedback = VectorSlotPair(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
   const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
   const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
-  const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
+  const Operator* CallConstruct(uint32_t arity, float frequency,
+                                VectorSlotPair const& feedback);
 
   const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
 
@@ -464,11 +501,10 @@
 
   const Operator* TypeOf();
   const Operator* InstanceOf();
+  const Operator* OrdinaryHasInstance();
 
-  const Operator* ForInDone();
   const Operator* ForInNext();
   const Operator* ForInPrepare();
-  const Operator* ForInStep();
 
   const Operator* LoadMessage();
   const Operator* StoreMessage();
@@ -483,8 +519,9 @@
   const Operator* StackCheck();
 
   const Operator* CreateFunctionContext(int slot_count);
-  const Operator* CreateCatchContext(const Handle<String>& name);
-  const Operator* CreateWithContext();
+  const Operator* CreateCatchContext(const Handle<String>& name,
+                                     const Handle<ScopeInfo>& scope_info);
+  const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
   const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
   const Operator* CreateModuleContext();
   const Operator* CreateScriptContext(const Handle<ScopeInfo>& scpope_info);
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 89ab0de..82df4ed 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -13,8 +13,8 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
-#include "src/type-cache.h"
-#include "src/types.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
 
 namespace v8 {
 namespace internal {
@@ -46,6 +46,7 @@
           return true;
         case BinaryOperationHint::kAny:
         case BinaryOperationHint::kNone:
+        case BinaryOperationHint::kString:
           break;
       }
     }
@@ -73,6 +74,37 @@
     return false;
   }
 
+  // Check if a string addition will definitely result in creating a ConsString,
+  // i.e. if the combined length of the resulting string exceeds the ConsString
+  // minimum length.
+  bool ShouldCreateConsString() {
+    DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
+    if (BothInputsAre(Type::String()) ||
+        ((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
+         BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
+      if (right_type()->IsConstant() &&
+          right_type()->AsConstant()->Value()->IsString()) {
+        Handle<String> right_string =
+            Handle<String>::cast(right_type()->AsConstant()->Value());
+        if (right_string->length() >= ConsString::kMinLength) return true;
+      }
+      if (left_type()->IsConstant() &&
+          left_type()->AsConstant()->Value()->IsString()) {
+        Handle<String> left_string =
+            Handle<String>::cast(left_type()->AsConstant()->Value());
+        if (left_string->length() >= ConsString::kMinLength) {
+          // The invariant for ConsString requires the left hand side to be
+          // a sequential or external string if the right hand side is the
+          // empty string. Since we don't know anything about the right hand
+          // side here, we must ensure that the left hand side satisfy the
+          // constraints independent of the right hand side.
+          return left_string->IsSeqString() || left_string->IsExternalString();
+        }
+      }
+    }
+    return false;
+  }
+
   void ConvertInputsToNumber() {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
@@ -430,8 +462,6 @@
       dependencies_(dependencies),
       flags_(flags),
       jsgraph_(jsgraph),
-      true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
-      false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
       the_hole_type_(
           Type::Constant(factory()->the_hole_value(), graph()->zone())),
       type_cache_(TypeCache::Get()) {
@@ -469,6 +499,9 @@
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
   if (r.OneInputIs(Type::String())) {
+    if (r.ShouldCreateConsString()) {
+      return ReduceCreateConsString(node);
+    }
     StringAddFlags flags = STRING_ADD_CHECK_NONE;
     if (!r.LeftInputIs(Type::String())) {
       flags = STRING_ADD_CONVERT_LEFT;
@@ -546,6 +579,123 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
+  Node* first = NodeProperties::GetValueInput(node, 0);
+  Node* second = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Make sure {first} is actually a String.
+  Type* first_type = NodeProperties::GetType(first);
+  if (!first_type->Is(Type::String())) {
+    first = effect =
+        graph()->NewNode(simplified()->CheckString(), first, effect, control);
+    first_type = NodeProperties::GetType(first);
+  }
+
+  // Make sure {second} is actually a String.
+  Type* second_type = NodeProperties::GetType(second);
+  if (!second_type->Is(Type::String())) {
+    second = effect =
+        graph()->NewNode(simplified()->CheckString(), second, effect, control);
+    second_type = NodeProperties::GetType(second);
+  }
+
+  // Determine the {first} length.
+  Node* first_length =
+      first_type->IsConstant()
+          ? jsgraph()->Constant(
+                Handle<String>::cast(first_type->AsConstant()->Value())
+                    ->length())
+          : effect = graph()->NewNode(
+                simplified()->LoadField(AccessBuilder::ForStringLength()),
+                first, effect, control);
+
+  // Determine the {second} length.
+  Node* second_length =
+      second_type->IsConstant()
+          ? jsgraph()->Constant(
+                Handle<String>::cast(second_type->AsConstant()->Value())
+                    ->length())
+          : effect = graph()->NewNode(
+                simplified()->LoadField(AccessBuilder::ForStringLength()),
+                second, effect, control);
+
+  // Compute the resulting length.
+  Node* length =
+      graph()->NewNode(simplified()->NumberAdd(), first_length, second_length);
+
+  // Check if we would overflow the allowed maximum string length.
+  Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+                                 jsgraph()->Constant(String::kMaxLength));
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  {
+    // Throw a RangeError in case of overflow.
+    Node* vfalse = efalse = graph()->NewNode(
+        javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
+        frame_state, efalse, if_false);
+    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+    if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
+    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+    NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+    Revisit(graph()->end());
+
+    // Update potential {IfException} uses of {node} to point to the
+    // %ThrowInvalidStringLength runtime call node instead.
+    for (Edge edge : node->use_edges()) {
+      if (edge.from()->opcode() == IrOpcode::kIfException) {
+        DCHECK(NodeProperties::IsControlEdge(edge) ||
+               NodeProperties::IsEffectEdge(edge));
+        edge.UpdateTo(vfalse);
+        Revisit(edge.from());
+      }
+    }
+  }
+  control = graph()->NewNode(common()->IfTrue(), branch);
+
+  // Figure out the map for the resulting ConsString.
+  // TODO(turbofan): We currently just use the cons_string_map here for
+  // the sake of simplicity; we could also try to be smarter here and
+  // use the one_byte_cons_string_map instead when the resulting ConsString
+  // contains only one byte characters.
+  Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
+
+  // Allocate the resulting ConsString.
+  effect = graph()->NewNode(
+      common()->BeginRegion(RegionObservability::kNotObservable), effect);
+  Node* value = effect =
+      graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                       jsgraph()->Constant(ConsString::kSize), effect, control);
+  NodeProperties::SetType(value, Type::OtherString());
+  effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                            value, value_map, effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForNameHashField()), value,
+      jsgraph()->Uint32Constant(Name::kEmptyHashField), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForStringLength()), value, length,
+      effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForConsStringFirst()), value,
+      first, effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForConsStringSecond()), value,
+      second, effect, control);
+
+  // Morph the {node} into a {FinishRegion}.
+  ReplaceWithValue(node, node, node, control);
+  node->ReplaceInput(0, value);
+  node->ReplaceInput(1, effect);
+  node->TrimInputCount(2);
+  NodeProperties::ChangeOp(node, common()->FinishRegion());
+  return Changed(node);
+}
+
 Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::String())) {
@@ -779,22 +929,10 @@
     NodeProperties::ChangeOp(node, simplified()->BooleanNot());
     return Changed(node);
   } else if (input_type->Is(Type::Number())) {
-    // JSToBoolean(x:number) => NumberLessThan(#0,NumberAbs(x))
+    // JSToBoolean(x:number) => NumberToBoolean(x)
     RelaxEffectsAndControls(node);
-    node->ReplaceInput(0, jsgraph()->ZeroConstant());
-    node->ReplaceInput(1, graph()->NewNode(simplified()->NumberAbs(), input));
-    node->TrimInputCount(2);
-    NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
-    return Changed(node);
-  } else if (input_type->Is(Type::String())) {
-    // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
-    FieldAccess const access = AccessBuilder::ForStringLength();
-    Node* length = graph()->NewNode(simplified()->LoadField(access), input,
-                                    graph()->start(), graph()->start());
-    ReplaceWithValue(node, node, length);
-    node->ReplaceInput(0, jsgraph()->ZeroConstant());
-    node->ReplaceInput(1, length);
-    NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
     return Changed(node);
   }
   return NoChange();
@@ -821,23 +959,12 @@
       input = jsgraph()->Constant(kMaxSafeInteger);
     } else {
       if (input_type->Min() <= 0.0) {
-        input = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(), input,
-                             jsgraph()->ZeroConstant()),
-            jsgraph()->ZeroConstant(), input);
-        input_type = Type::Range(0.0, input_type->Max(), graph()->zone());
-        NodeProperties::SetType(input, input_type);
+        input = graph()->NewNode(simplified()->NumberMax(),
+                                 jsgraph()->ZeroConstant(), input);
       }
       if (input_type->Max() > kMaxSafeInteger) {
-        input = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(),
-                             jsgraph()->Constant(kMaxSafeInteger), input),
-            jsgraph()->Constant(kMaxSafeInteger), input);
-        input_type =
-            Type::Range(input_type->Min(), kMaxSafeInteger, graph()->zone());
-        NodeProperties::SetType(input, input_type);
+        input = graph()->NewNode(simplified()->NumberMin(),
+                                 jsgraph()->Constant(kMaxSafeInteger), input);
       }
     }
     ReplaceWithValue(node, input);
@@ -1132,169 +1259,162 @@
   return NoChange();
 }
 
-Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
-  Node* const context = NodeProperties::GetContextInput(node);
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node);
+Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+  Type* constructor_type = NodeProperties::GetType(constructor);
+  Node* object = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
 
-  // If deoptimization is disabled, we cannot optimize.
-  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
-  // If we are in a try block, don't optimize since the runtime call
-  // in the proxy case can throw.
-  if (NodeProperties::IsExceptionalCall(node)) return NoChange();
-
-  JSBinopReduction r(this, node);
-  Node* effect = r.effect();
-  Node* control = r.control();
-
-  if (!r.right_type()->IsConstant() ||
-      !r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+  // Check if the {constructor} is a (known) JSFunction.
+  if (!constructor_type->IsConstant() ||
+      !constructor_type->AsConstant()->Value()->IsJSFunction()) {
     return NoChange();
   }
-
   Handle<JSFunction> function =
-      Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
-  Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+      Handle<JSFunction>::cast(constructor_type->AsConstant()->Value());
 
-  // Make sure the prototype of {function} is the %FunctionPrototype%, and it
-  // already has a meaningful initial map (i.e. we constructed at least one
-  // instance using the constructor {function}).
-  if (function->map()->prototype() != function->native_context()->closure() ||
-      function->map()->has_non_instance_prototype() ||
-      !function->has_initial_map()) {
-    return NoChange();
-  }
+  // Check if the {function} already has an initial map (i.e. the
+  // {function} has been used as a constructor at least once).
+  if (!function->has_initial_map()) return NoChange();
 
-  // We can only use the fast case if @@hasInstance was not used so far.
-  if (!isolate()->IsHasInstanceLookupChainIntact()) return NoChange();
-  dependencies()->AssumePropertyCell(factory()->has_instance_protector());
+  // Check if the {function}s "prototype" is a JSReceiver.
+  if (!function->prototype()->IsJSReceiver()) return NoChange();
 
+  // Install a code dependency on the {function}s initial map.
   Handle<Map> initial_map(function->initial_map(), isolate());
   dependencies()->AssumeInitialMapCantChange(initial_map);
+
   Node* prototype =
       jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
 
-  // If the left hand side is an object, no smi check is needed.
-  Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
-  Node* branch_is_smi =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
-  Node* if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
-  Node* e_is_smi = effect;
-  control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+  Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
 
-  Node* object_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       r.left(), effect, control);
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0 = jsgraph()->FalseConstant();
+
+  control = graph()->NewNode(common()->IfFalse(), branch0);
 
   // Loop through the {object}s prototype chain looking for the {prototype}.
   Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
-
-  Node* loop_effect = effect =
+  Node* eloop = effect =
       graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+  Node* vloop = object = graph()->NewNode(
+      common()->Phi(MachineRepresentation::kTagged, 2), object, object, loop);
+  // TODO(jarin): This is a very ugly hack to work-around the super-smart
+  // implicit typing of the Phi, which goes completely nuts if the {object}
+  // is for example a HeapConstant.
+  NodeProperties::SetType(vloop, Type::NonInternal());
 
-  Node* loop_object_map =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       object_map, r.left(), loop);
+  // Load the {object} map and instance type.
+  Node* object_map = effect =
+      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+                       effect, control);
+  Node* object_instance_type = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), object_map,
+      effect, control);
 
-  // Check if the lhs needs access checks.
-  Node* map_bit_field = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMapBitField()),
-                       loop_object_map, loop_effect, control);
-  int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
-  Node* is_access_check_needed_num =
-      graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
-                       jsgraph()->Constant(is_access_check_needed_bit));
-  Node* is_access_check_needed =
-      graph()->NewNode(simplified()->NumberEqual(), is_access_check_needed_num,
-                       jsgraph()->Constant(is_access_check_needed_bit));
+  // Check if the {object} is a special receiver, because for special
+  // receivers, i.e. proxies or API objects that need access checks,
+  // we have to use the %HasInPrototypeChain runtime function instead.
+  Node* check1 = graph()->NewNode(
+      simplified()->NumberLessThanOrEqual(), object_instance_type,
+      jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+  Node* branch1 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
 
-  Node* branch_is_access_check_needed = graph()->NewNode(
-      common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
-  Node* if_is_access_check_needed =
-      graph()->NewNode(common()->IfTrue(), branch_is_access_check_needed);
-  Node* e_is_access_check_needed = effect;
+  control = graph()->NewNode(common()->IfFalse(), branch1);
 
-  control =
-      graph()->NewNode(common()->IfFalse(), branch_is_access_check_needed);
+  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+  Node* etrue1 = effect;
+  Node* vtrue1;
 
-  // Check if the lhs is a proxy.
-  Node* map_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-      loop_object_map, loop_effect, control);
-  Node* is_proxy =
-      graph()->NewNode(simplified()->NumberEqual(), map_instance_type,
-                       jsgraph()->Constant(JS_PROXY_TYPE));
-  Node* branch_is_proxy =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
-  Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
-  Node* e_is_proxy = effect;
+  // Check if the {object} is not a receiver at all.
+  Node* check10 =
+      graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
+                       jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+  Node* branch10 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
 
-  control = graph()->NewNode(common()->Merge(2), if_is_access_check_needed,
-                             if_is_proxy);
-  effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
-                            e_is_proxy, control);
+  // A primitive value cannot match the {prototype} we're looking for.
+  if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+  vtrue1 = jsgraph()->FalseConstant();
 
-  // If we need an access check or the object is a Proxy, make a runtime call
-  // to finish the lowering.
-  Node* runtimecall = graph()->NewNode(
-      javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
-      prototype, context, frame_state, effect, control);
+  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+  Node* efalse1 = etrue1;
+  Node* vfalse1;
+  {
+    // Slow path, need to call the %HasInPrototypeChain runtime function.
+    vfalse1 = efalse1 = graph()->NewNode(
+        javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
+        prototype, context, frame_state, efalse1, if_false1);
+    if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
 
-  Node* runtimecall_control =
-      graph()->NewNode(common()->IfSuccess(), runtimecall);
+    // Replace any potential IfException on {node} to catch exceptions
+    // from this %HasInPrototypeChain runtime call instead.
+    for (Edge edge : node->use_edges()) {
+      if (edge.from()->opcode() == IrOpcode::kIfException) {
+        edge.UpdateTo(vfalse1);
+        Revisit(edge.from());
+      }
+    }
+  }
 
-  control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
-
+  // Load the {object} prototype.
   Node* object_prototype = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapPrototype()),
-      loop_object_map, loop_effect, control);
+      simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
+      effect, control);
 
-  // If not, check if object prototype is the null prototype.
-  Node* null_proto =
-      graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
-                       jsgraph()->NullConstant());
-  Node* branch_null_proto = graph()->NewNode(
-      common()->Branch(BranchHint::kFalse), null_proto, control);
-  Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
-  Node* e_null_proto = effect;
+  // Check if we reached the end of {object}s prototype chain.
+  Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
+                                  object_prototype, jsgraph()->NullConstant());
+  Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
 
-  control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+  Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+  Node* etrue2 = effect;
+  Node* vtrue2 = jsgraph()->FalseConstant();
 
-  // Check if object prototype is equal to function prototype.
-  Node* eq_proto = graph()->NewNode(simplified()->ReferenceEqual(),
-                                    object_prototype, prototype);
-  Node* branch_eq_proto =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
-  Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
-  Node* e_eq_proto = effect;
+  control = graph()->NewNode(common()->IfFalse(), branch2);
 
-  control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+  // Check if we reached the {prototype}.
+  Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+                                  object_prototype, prototype);
+  Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
 
-  Node* load_object_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       object_prototype, effect, control);
+  Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+  Node* etrue3 = effect;
+  Node* vtrue3 = jsgraph()->TrueConstant();
+
+  control = graph()->NewNode(common()->IfFalse(), branch3);
+
   // Close the loop.
-  loop_effect->ReplaceInput(1, effect);
-  loop_object_map->ReplaceInput(1, load_object_map);
+  vloop->ReplaceInput(1, object_prototype);
+  eloop->ReplaceInput(1, effect);
   loop->ReplaceInput(1, control);
 
-  control = graph()->NewNode(common()->Merge(3), runtimecall_control,
-                             if_eq_proto, if_null_proto);
-  effect = graph()->NewNode(common()->EffectPhi(3), runtimecall, e_eq_proto,
-                            e_null_proto, control);
+  control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
+                             if_true3, if_false1);
+  effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+                            etrue3, efalse1, control);
 
-  Node* result = graph()->NewNode(
-      common()->Phi(MachineRepresentation::kTagged, 3), runtimecall,
-      jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
-
-  control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
-  effect = graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
-  result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                            jsgraph()->FalseConstant(), result, control);
-
-  ReplaceWithValue(node, result, effect, control);
-  return Changed(result);
+  // Morph the {node} into an appropriate Phi.
+  ReplaceWithValue(node, node, effect, control);
+  node->ReplaceInput(0, vtrue0);
+  node->ReplaceInput(1, vtrue1);
+  node->ReplaceInput(2, vtrue2);
+  node->ReplaceInput(3, vtrue3);
+  node->ReplaceInput(4, vfalse1);
+  node->ReplaceInput(5, control);
+  node->TrimInputCount(6);
+  NodeProperties::ChangeOp(node,
+                           common()->Phi(MachineRepresentation::kTagged, 5));
+  return Changed(node);
 }
 
 Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
@@ -1546,16 +1666,18 @@
   const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
   Node* argc_node = jsgraph->Int32Constant(argc);
 
-  node->InsertInput(zone, arity + 2, argc_node);
-  node->InsertInput(zone, arity + 3, target);
-  node->InsertInput(zone, arity + 4, new_target);
+  static const int kStubAndReceiver = 2;
+  int cursor = arity + kStubAndReceiver;
+  node->InsertInput(zone, cursor++, argc_node);
+  node->InsertInput(zone, cursor++, target);
+  node->InsertInput(zone, cursor++, new_target);
 
   Address entry = Builtins::CppEntryOf(builtin_index);
   ExternalReference entry_ref(ExternalReference(entry, isolate));
   Node* entry_node = jsgraph->ExternalConstant(entry_ref);
 
-  node->InsertInput(zone, arity + 5, entry_node);
-  node->InsertInput(zone, arity + 6, argc_node);
+  node->InsertInput(zone, cursor++, entry_node);
+  node->InsertInput(zone, cursor++, argc_node);
 
   static const int kReturnCount = 1;
   const char* debug_name = Builtins::name(builtin_index);
@@ -1566,6 +1688,12 @@
   NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
 }
 
+bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
+  static const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  const int num_decl_parms = shared->internal_formal_parameter_count();
+  return (num_decl_parms != arity && num_decl_parms != sentinel);
+}
+
 }  // namespace
 
 Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
@@ -1591,9 +1719,7 @@
     CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
 
     if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
-        (shared->internal_formal_parameter_count() == arity ||
-         shared->internal_formal_parameter_count() ==
-             SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+        !NeedsArgumentAdaptorFrame(shared, arity)) {
       // Patch {node} to a direct CEntryStub call.
 
       // Load the context from the {target}.
@@ -1705,22 +1831,7 @@
 
     Node* new_target = jsgraph()->UndefinedConstant();
     Node* argument_count = jsgraph()->Int32Constant(arity);
-    if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
-        (shared->internal_formal_parameter_count() == arity ||
-         shared->internal_formal_parameter_count() ==
-             SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
-      // Patch {node} to a direct CEntryStub call.
-      ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
-    } else if (shared->internal_formal_parameter_count() == arity ||
-               shared->internal_formal_parameter_count() ==
-                   SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
-      // Patch {node} to a direct call.
-      node->InsertInput(graph()->zone(), arity + 2, new_target);
-      node->InsertInput(graph()->zone(), arity + 3, argument_count);
-      NodeProperties::ChangeOp(node,
-                               common()->Call(Linkage::GetJSCallDescriptor(
-                                   graph()->zone(), false, 1 + arity, flags)));
-    } else {
+    if (NeedsArgumentAdaptorFrame(shared, arity)) {
       // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
       Callable callable = CodeFactory::ArgumentAdaptor(isolate());
       node->InsertInput(graph()->zone(), 0,
@@ -1734,6 +1845,16 @@
           node, common()->Call(Linkage::GetStubCallDescriptor(
                     isolate(), graph()->zone(), callable.descriptor(),
                     1 + arity, flags)));
+    } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
+      // Patch {node} to a direct CEntryStub call.
+      ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+    } else {
+      // Patch {node} to a direct call.
+      node->InsertInput(graph()->zone(), arity + 2, new_target);
+      node->InsertInput(graph()->zone(), arity + 3, argument_count);
+      NodeProperties::ChangeOp(node,
+                               common()->Call(Linkage::GetJSCallDescriptor(
+                                   graph()->zone(), false, 1 + arity, flags)));
     }
     return Changed(node);
   }
@@ -1761,8 +1882,8 @@
   // Maybe we did at least learn something about the {receiver}.
   if (p.convert_mode() != convert_mode) {
     NodeProperties::ChangeOp(
-        node, javascript()->CallFunction(p.arity(), p.feedback(), convert_mode,
-                                         p.tail_call_mode()));
+        node, javascript()->CallFunction(p.arity(), p.frequency(), p.feedback(),
+                                         convert_mode, p.tail_call_mode()));
     return Changed(node);
   }
 
@@ -1770,14 +1891,6 @@
 }
 
 
-Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSForInDone, node->opcode());
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->Word32Equal());
-  return Changed(node);
-}
-
-
 Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1843,14 +1956,6 @@
   return Changed(node);
 }
 
-
-Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSForInStep, node->opcode());
-  node->ReplaceInput(1, jsgraph()->Int32Constant(1));
-  NodeProperties::ChangeOp(node, machine()->Int32Add());
-  return Changed(node);
-}
-
 Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
   DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
   Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -1930,174 +2035,7 @@
   return Changed(element);
 }
 
-Reduction JSTypedLowering::ReduceSelect(Node* node) {
-  DCHECK_EQ(IrOpcode::kSelect, node->opcode());
-  Node* const condition = NodeProperties::GetValueInput(node, 0);
-  Type* const condition_type = NodeProperties::GetType(condition);
-  Node* const vtrue = NodeProperties::GetValueInput(node, 1);
-  Type* const vtrue_type = NodeProperties::GetType(vtrue);
-  Node* const vfalse = NodeProperties::GetValueInput(node, 2);
-  Type* const vfalse_type = NodeProperties::GetType(vfalse);
-  if (condition_type->Is(true_type_)) {
-    // Select(condition:true, vtrue, vfalse) => vtrue
-    return Replace(vtrue);
-  }
-  if (condition_type->Is(false_type_)) {
-    // Select(condition:false, vtrue, vfalse) => vfalse
-    return Replace(vfalse);
-  }
-  if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
-    // Select(condition, vtrue:true, vfalse:false) => condition
-    return Replace(condition);
-  }
-  if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
-    // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
-    node->TrimInputCount(1);
-    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
-    return Changed(node);
-  }
-  return NoChange();
-}
-
-namespace {
-
-MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
-  if (object_type->IsConstant() &&
-      object_type->AsConstant()->Value()->IsHeapObject()) {
-    Handle<Map> object_map(
-        Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
-    if (object_map->is_stable()) return object_map;
-  } else if (object_type->IsClass()) {
-    Handle<Map> object_map = object_type->AsClass()->Map();
-    if (object_map->is_stable()) return object_map;
-  }
-  return MaybeHandle<Map>();
-}
-
-}  // namespace
-
-Reduction JSTypedLowering::ReduceCheckMaps(Node* node) {
-  // TODO(bmeurer): Find a better home for this thing!
-  // The CheckMaps(o, ...map...) can be eliminated if map is stable and
-  // either
-  //  (a) o has type Constant(object) and map == object->map, or
-  //  (b) o has type Class(map),
-  // and either
-  //  (1) map cannot transition further, or
-  //  (2) we can add a code dependency on the stability of map
-  //      (to guard the Constant type information).
-  Node* const object = NodeProperties::GetValueInput(node, 0);
-  Type* const object_type = NodeProperties::GetType(object);
-  Node* const effect = NodeProperties::GetEffectInput(node);
-  Handle<Map> object_map;
-  if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
-    for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
-      Node* const map = NodeProperties::GetValueInput(node, i);
-      Type* const map_type = NodeProperties::GetType(map);
-      if (map_type->IsConstant() &&
-          map_type->AsConstant()->Value().is_identical_to(object_map)) {
-        if (object_map->CanTransition()) {
-          DCHECK(flags() & kDeoptimizationEnabled);
-          dependencies()->AssumeMapStable(object_map);
-        }
-        return Replace(effect);
-      }
-    }
-  }
-  return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceCheckString(Node* node) {
-  // TODO(bmeurer): Find a better home for this thing!
-  Node* const input = NodeProperties::GetValueInput(node, 0);
-  Type* const input_type = NodeProperties::GetType(input);
-  if (input_type->Is(Type::String())) {
-    ReplaceWithValue(node, input);
-    return Replace(input);
-  }
-  return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceLoadField(Node* node) {
-  // TODO(bmeurer): Find a better home for this thing!
-  Node* const object = NodeProperties::GetValueInput(node, 0);
-  Type* const object_type = NodeProperties::GetType(object);
-  FieldAccess const& access = FieldAccessOf(node->op());
-  if (access.base_is_tagged == kTaggedBase &&
-      access.offset == HeapObject::kMapOffset) {
-    // We can replace LoadField[Map](o) with map if is stable and either
-    //  (a) o has type Constant(object) and map == object->map, or
-    //  (b) o has type Class(map),
-    // and either
-    //  (1) map cannot transition further, or
-    //  (2) deoptimization is enabled and we can add a code dependency on the
-    //      stability of map (to guard the Constant type information).
-    Handle<Map> object_map;
-    if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
-      if (object_map->CanTransition()) {
-        if (flags() & kDeoptimizationEnabled) {
-          dependencies()->AssumeMapStable(object_map);
-        } else {
-          return NoChange();
-        }
-      }
-      Node* const value = jsgraph()->HeapConstant(object_map);
-      ReplaceWithValue(node, value);
-      return Replace(value);
-    }
-  }
-  return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceNumberRoundop(Node* node) {
-  // TODO(bmeurer): Find a better home for this thing!
-  Node* const input = NodeProperties::GetValueInput(node, 0);
-  Type* const input_type = NodeProperties::GetType(input);
-  if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
-    return Replace(input);
-  }
-  return NoChange();
-}
-
 Reduction JSTypedLowering::Reduce(Node* node) {
-  // Check if the output type is a singleton.  In that case we already know the
-  // result value and can simply replace the node if it's eliminable.
-  if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
-      node->op()->HasProperty(Operator::kEliminatable)) {
-    // We can only constant-fold nodes here, that are known to not cause any
-    // side-effect, may it be a JavaScript observable side-effect or a possible
-    // eager deoptimization exit (i.e. {node} has an operator that doesn't have
-    // the Operator::kNoDeopt property).
-    Type* upper = NodeProperties::GetType(node);
-    if (upper->IsInhabited()) {
-      if (upper->IsConstant()) {
-        Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      } else if (upper->Is(Type::MinusZero())) {
-        Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      } else if (upper->Is(Type::NaN())) {
-        Node* replacement = jsgraph()->NaNConstant();
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      } else if (upper->Is(Type::Null())) {
-        Node* replacement = jsgraph()->NullConstant();
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      } else if (upper->Is(Type::PlainNumber()) &&
-                 upper->Min() == upper->Max()) {
-        Node* replacement = jsgraph()->Constant(upper->Min());
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      } else if (upper->Is(Type::Undefined())) {
-        Node* replacement = jsgraph()->UndefinedConstant();
-        ReplaceWithValue(node, replacement);
-        return Changed(replacement);
-      }
-    }
-  }
   switch (node->opcode()) {
     case IrOpcode::kJSEqual:
       return ReduceJSEqual(node, false);
@@ -2128,6 +2066,8 @@
     case IrOpcode::kJSDivide:
     case IrOpcode::kJSModulus:
       return ReduceNumberBinop(node);
+    case IrOpcode::kJSOrdinaryHasInstance:
+      return ReduceJSOrdinaryHasInstance(node);
     case IrOpcode::kJSToBoolean:
       return ReduceJSToBoolean(node);
     case IrOpcode::kJSToInteger:
@@ -2146,8 +2086,6 @@
       return ReduceJSLoadProperty(node);
     case IrOpcode::kJSStoreProperty:
       return ReduceJSStoreProperty(node);
-    case IrOpcode::kJSInstanceOf:
-      return ReduceJSInstanceOf(node);
     case IrOpcode::kJSLoadContext:
       return ReduceJSLoadContext(node);
     case IrOpcode::kJSStoreContext:
@@ -2158,31 +2096,14 @@
       return ReduceJSCallConstruct(node);
     case IrOpcode::kJSCallFunction:
       return ReduceJSCallFunction(node);
-    case IrOpcode::kJSForInDone:
-      return ReduceJSForInDone(node);
     case IrOpcode::kJSForInNext:
       return ReduceJSForInNext(node);
-    case IrOpcode::kJSForInStep:
-      return ReduceJSForInStep(node);
     case IrOpcode::kJSGeneratorStore:
       return ReduceJSGeneratorStore(node);
     case IrOpcode::kJSGeneratorRestoreContinuation:
       return ReduceJSGeneratorRestoreContinuation(node);
     case IrOpcode::kJSGeneratorRestoreRegister:
       return ReduceJSGeneratorRestoreRegister(node);
-    case IrOpcode::kSelect:
-      return ReduceSelect(node);
-    case IrOpcode::kCheckMaps:
-      return ReduceCheckMaps(node);
-    case IrOpcode::kCheckString:
-      return ReduceCheckString(node);
-    case IrOpcode::kNumberCeil:
-    case IrOpcode::kNumberFloor:
-    case IrOpcode::kNumberRound:
-    case IrOpcode::kNumberTrunc:
-      return ReduceNumberRoundop(node);
-    case IrOpcode::kLoadField:
-      return ReduceLoadField(node);
     default:
       break;
   }
@@ -2208,10 +2129,6 @@
   return jsgraph()->common();
 }
 
-MachineOperatorBuilder* JSTypedLowering::machine() const {
-  return jsgraph()->machine();
-}
-
 SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
   return jsgraph()->simplified();
 }
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 35c397f..b0cf1f4 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -15,8 +15,6 @@
 // Forward declarations.
 class CompilationDependencies;
 class Factory;
-class TypeCache;
-
 
 namespace compiler {
 
@@ -24,9 +22,8 @@
 class CommonOperatorBuilder;
 class JSGraph;
 class JSOperatorBuilder;
-class MachineOperatorBuilder;
 class SimplifiedOperatorBuilder;
-
+class TypeCache;
 
 // Lowers JS-level operators to simplified operators based on types.
 class JSTypedLowering final : public AdvancedReducer {
@@ -52,7 +49,7 @@
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
   Reduction ReduceJSStoreProperty(Node* node);
-  Reduction ReduceJSInstanceOf(Node* node);
+  Reduction ReduceJSOrdinaryHasInstance(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
   Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
@@ -69,20 +66,14 @@
   Reduction ReduceJSConvertReceiver(Node* node);
   Reduction ReduceJSCallConstruct(Node* node);
   Reduction ReduceJSCallFunction(Node* node);
-  Reduction ReduceJSForInDone(Node* node);
   Reduction ReduceJSForInNext(Node* node);
-  Reduction ReduceJSForInStep(Node* node);
   Reduction ReduceJSGeneratorStore(Node* node);
   Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
   Reduction ReduceJSGeneratorRestoreRegister(Node* node);
-  Reduction ReduceCheckMaps(Node* node);
-  Reduction ReduceCheckString(Node* node);
-  Reduction ReduceLoadField(Node* node);
-  Reduction ReduceNumberRoundop(Node* node);
-  Reduction ReduceSelect(Node* node);
   Reduction ReduceNumberBinop(Node* node);
   Reduction ReduceInt32Binop(Node* node);
   Reduction ReduceUI32Shift(Node* node, Signedness signedness);
+  Reduction ReduceCreateConsString(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -91,7 +82,6 @@
   JSOperatorBuilder* javascript() const;
   CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() const;
-  MachineOperatorBuilder* machine() const;
   CompilationDependencies* dependencies() const;
   Flags flags() const { return flags_; }
 
@@ -99,8 +89,6 @@
   Flags flags_;
   JSGraph* jsgraph_;
   Type* shifted_int32_ranges_[4];
-  Type* const true_type_;
-  Type* const false_type_;
   Type* const the_hole_type_;
   TypeCache const& type_cache_;
 };
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index e4df58d..523ce47 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -7,7 +7,7 @@
 #include "src/ast/scopes.h"
 #include "src/builtins/builtins-utils.h"
 #include "src/code-stubs.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/node.h"
@@ -24,34 +24,6 @@
   return LinkageLocation::ForRegister(reg.code(), type);
 }
 
-MachineType reptyp(Representation representation) {
-  switch (representation.kind()) {
-    case Representation::kInteger8:
-      return MachineType::Int8();
-    case Representation::kUInteger8:
-      return MachineType::Uint8();
-    case Representation::kInteger16:
-      return MachineType::Int16();
-    case Representation::kUInteger16:
-      return MachineType::Uint16();
-    case Representation::kInteger32:
-      return MachineType::Int32();
-    case Representation::kSmi:
-    case Representation::kTagged:
-    case Representation::kHeapObject:
-      return MachineType::AnyTagged();
-    case Representation::kDouble:
-      return MachineType::Float64();
-    case Representation::kExternal:
-      return MachineType::Pointer();
-    case Representation::kNone:
-    case Representation::kNumRepresentations:
-      break;
-  }
-  UNREACHABLE();
-  return MachineType::None();
-}
-
 }  // namespace
 
 
@@ -152,17 +124,16 @@
 
 // static
 bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
-  // Most runtime functions need a FrameState. A few chosen ones that we know
-  // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
-  // are blacklisted here and can be called without a FrameState.
   switch (function) {
+    // Most runtime functions need a FrameState. A few chosen ones that we know
+    // not to call into arbitrary JavaScript, not to throw, and not to
+    // deoptimize
+    // are whitelisted here and can be called without a FrameState.
     case Runtime::kAbort:
     case Runtime::kAllocateInTargetSpace:
     case Runtime::kCreateIterResultObject:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
-    case Runtime::kForInDone:
-    case Runtime::kForInStep:
     case Runtime::kGeneratorGetContinuation:
     case Runtime::kGetSuperConstructor:
     case Runtime::kIsFunction:
@@ -183,29 +154,29 @@
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return false;
-    case Runtime::kInlineCall:
-    case Runtime::kInlineDeoptimizeNow:
-    case Runtime::kInlineGetPrototype:
-    case Runtime::kInlineNewObject:
-    case Runtime::kInlineRegExpConstructResult:
-    case Runtime::kInlineRegExpExec:
-    case Runtime::kInlineSubString:
-    case Runtime::kInlineThrowNotDateError:
-    case Runtime::kInlineToInteger:
-    case Runtime::kInlineToLength:
-    case Runtime::kInlineToNumber:
-    case Runtime::kInlineToObject:
-    case Runtime::kInlineToString:
-      return true;
+
+    // Some inline intrinsics are also safe to call without a FrameState.
+    case Runtime::kInlineCreateIterResultObject:
+    case Runtime::kInlineFixedArrayGet:
+    case Runtime::kInlineFixedArraySet:
+    case Runtime::kInlineGeneratorClose:
+    case Runtime::kInlineGeneratorGetInputOrDebugPos:
+    case Runtime::kInlineGeneratorGetResumeMode:
+    case Runtime::kInlineGetSuperConstructor:
+    case Runtime::kInlineIsArray:
+    case Runtime::kInlineIsJSReceiver:
+    case Runtime::kInlineIsRegExp:
+    case Runtime::kInlineIsSmi:
+    case Runtime::kInlineIsTypedArray:
+    case Runtime::kInlineRegExpFlags:
+    case Runtime::kInlineRegExpSource:
+      return false;
+
     default:
       break;
   }
 
-  // Most inlined runtime functions (except the ones listed above) can be called
-  // without a FrameState or will be lowered by JSIntrinsicLowering internally.
-  const Runtime::Function* const f = Runtime::FunctionForId(function);
-  if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
-
+  // For safety, default to needing a FrameState unless whitelisted.
   return true;
 }
 
@@ -382,8 +353,7 @@
     if (i < register_parameter_count) {
       // The first parameters go in registers.
       Register reg = descriptor.GetRegisterParameter(i);
-      MachineType type =
-          reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+      MachineType type = descriptor.GetParameterType(i);
       locations.AddParam(regloc(reg, type));
     } else {
       // The rest of the parameters go on the stack.
@@ -452,8 +422,7 @@
     if (i < register_parameter_count) {
       // The first parameters go in registers.
       Register reg = descriptor.GetRegisterParameter(i);
-      MachineType type =
-          reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+      MachineType type = descriptor.GetParameterType(i);
       locations.AddParam(regloc(reg, type));
     } else {
       // The rest of the parameters go on the stack.
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 1c02508..6f302bc 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -11,7 +11,7 @@
 #include "src/frames.h"
 #include "src/machine-type.h"
 #include "src/runtime/runtime.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/live-range-separator.cc b/src/compiler/live-range-separator.cc
index e3cd0a3..db65593 100644
--- a/src/compiler/live-range-separator.cc
+++ b/src/compiler/live-range-separator.cc
@@ -58,6 +58,15 @@
   }
 }
 
+void SetSlotUse(TopLevelLiveRange *range) {
+  range->set_has_slot_use(false);
+  for (const UsePosition *pos = range->first_pos();
+       !range->has_slot_use() && pos != nullptr; pos = pos->next()) {
+    if (pos->type() == UsePositionType::kRequiresSlot) {
+      range->set_has_slot_use(true);
+    }
+  }
+}
 
 void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
   const InstructionSequence *code = data->code();
@@ -99,7 +108,14 @@
   if (first_cut.IsValid()) {
     CreateSplinter(range, data, first_cut, last_cut);
   }
+
+  // Redo has_slot_use
+  if (range->has_slot_use() && range->splinter() != nullptr) {
+    SetSlotUse(range);
+    SetSlotUse(range->splinter());
+  }
 }
+
 }  // namespace
 
 
diff --git a/src/compiler/live-range-separator.h b/src/compiler/live-range-separator.h
index 57bc982..6aaf6b6 100644
--- a/src/compiler/live-range-separator.h
+++ b/src/compiler/live-range-separator.h
@@ -5,8 +5,7 @@
 #ifndef V8_LIVE_RANGE_SEPARATOR_H_
 #define V8_LIVE_RANGE_SEPARATOR_H_
 
-
-#include <src/zone.h>
+#include "src/zone/zone.h"
 namespace v8 {
 namespace internal {
 
diff --git a/src/compiler/liveness-analyzer.h b/src/compiler/liveness-analyzer.h
index 9b09724..8a3d715 100644
--- a/src/compiler/liveness-analyzer.h
+++ b/src/compiler/liveness-analyzer.h
@@ -7,7 +7,7 @@
 
 #include "src/bit-vector.h"
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index ad787f8..93c24a0 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/load-elimination.h"
 
+#include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
@@ -21,28 +22,38 @@
   if (!NodeProperties::GetType(a)->Maybe(NodeProperties::GetType(b))) {
     return kNoAlias;
   }
-  if (b->opcode() == IrOpcode::kAllocate) {
-    switch (a->opcode()) {
-      case IrOpcode::kAllocate:
-      case IrOpcode::kHeapConstant:
-      case IrOpcode::kParameter:
-        return kNoAlias;
-      case IrOpcode::kFinishRegion:
-        return QueryAlias(a->InputAt(0), b);
-      default:
-        break;
+  switch (b->opcode()) {
+    case IrOpcode::kAllocate: {
+      switch (a->opcode()) {
+        case IrOpcode::kAllocate:
+        case IrOpcode::kHeapConstant:
+        case IrOpcode::kParameter:
+          return kNoAlias;
+        default:
+          break;
+      }
+      break;
     }
+    case IrOpcode::kFinishRegion:
+      return QueryAlias(a, b->InputAt(0));
+    default:
+      break;
   }
-  if (a->opcode() == IrOpcode::kAllocate) {
-    switch (b->opcode()) {
-      case IrOpcode::kHeapConstant:
-      case IrOpcode::kParameter:
-        return kNoAlias;
-      case IrOpcode::kFinishRegion:
-        return QueryAlias(a, b->InputAt(0));
-      default:
-        break;
+  switch (a->opcode()) {
+    case IrOpcode::kAllocate: {
+      switch (b->opcode()) {
+        case IrOpcode::kHeapConstant:
+        case IrOpcode::kParameter:
+          return kNoAlias;
+        default:
+          break;
+      }
+      break;
     }
+    case IrOpcode::kFinishRegion:
+      return QueryAlias(a->InputAt(0), b);
+    default:
+      break;
   }
   return kMayAlias;
 }
@@ -54,7 +65,35 @@
 }  // namespace
 
 Reduction LoadElimination::Reduce(Node* node) {
+  if (FLAG_trace_turbo_load_elimination) {
+    if (node->op()->EffectInputCount() > 0) {
+      PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic());
+      if (node->op()->ValueInputCount() > 0) {
+        PrintF("(");
+        for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+          if (i > 0) PrintF(", ");
+          Node* const value = NodeProperties::GetValueInput(node, i);
+          PrintF("#%d:%s", value->id(), value->op()->mnemonic());
+        }
+        PrintF(")");
+      }
+      PrintF("\n");
+      for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+        Node* const effect = NodeProperties::GetEffectInput(node, i);
+        if (AbstractState const* const state = node_states_.Get(effect)) {
+          PrintF("  state[%i]: #%d:%s\n", i, effect->id(),
+                 effect->op()->mnemonic());
+          state->Print();
+        } else {
+          PrintF("  no state[%i]: #%d:%s\n", i, effect->id(),
+                 effect->op()->mnemonic());
+        }
+      }
+    }
+  }
   switch (node->opcode()) {
+    case IrOpcode::kArrayBufferWasNeutered:
+      return ReduceArrayBufferWasNeutered(node);
     case IrOpcode::kCheckMaps:
       return ReduceCheckMaps(node);
     case IrOpcode::kEnsureWritableFastElements:
@@ -85,6 +124,73 @@
   return NoChange();
 }
 
+namespace {
+
+bool IsCompatibleCheck(Node const* a, Node const* b) {
+  if (a->op() != b->op()) return false;
+  for (int i = a->op()->ValueInputCount(); --i >= 0;) {
+    if (!MustAlias(a->InputAt(i), b->InputAt(i))) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+Node* LoadElimination::AbstractChecks::Lookup(Node* node) const {
+  for (Node* const check : nodes_) {
+    if (check && IsCompatibleCheck(check, node)) {
+      return check;
+    }
+  }
+  return nullptr;
+}
+
+bool LoadElimination::AbstractChecks::Equals(AbstractChecks const* that) const {
+  if (this == that) return true;
+  for (size_t i = 0; i < arraysize(nodes_); ++i) {
+    if (Node* this_node = this->nodes_[i]) {
+      for (size_t j = 0;; ++j) {
+        if (j == arraysize(nodes_)) return false;
+        if (that->nodes_[j] == this_node) break;
+      }
+    }
+  }
+  for (size_t i = 0; i < arraysize(nodes_); ++i) {
+    if (Node* that_node = that->nodes_[i]) {
+      for (size_t j = 0;; ++j) {
+        if (j == arraysize(nodes_)) return false;
+        if (this->nodes_[j] == that_node) break;
+      }
+    }
+  }
+  return true;
+}
+
+LoadElimination::AbstractChecks const* LoadElimination::AbstractChecks::Merge(
+    AbstractChecks const* that, Zone* zone) const {
+  if (this->Equals(that)) return this;
+  AbstractChecks* copy = new (zone) AbstractChecks(zone);
+  for (Node* const this_node : this->nodes_) {
+    if (this_node == nullptr) continue;
+    for (Node* const that_node : that->nodes_) {
+      if (this_node == that_node) {
+        copy->nodes_[copy->next_index_++] = this_node;
+        break;
+      }
+    }
+  }
+  copy->next_index_ %= arraysize(nodes_);
+  return copy;
+}
+
+void LoadElimination::AbstractChecks::Print() const {
+  for (Node* const node : nodes_) {
+    if (node != nullptr) {
+      PrintF("    #%d:%s\n", node->id(), node->op()->mnemonic());
+    }
+  }
+}
+
 Node* LoadElimination::AbstractElements::Lookup(Node* object,
                                                 Node* index) const {
   for (Element const element : elements_) {
@@ -110,7 +216,8 @@
         DCHECK_NOT_NULL(element.index);
         DCHECK_NOT_NULL(element.value);
         if (!MayAlias(object, element.object) ||
-            !MayAlias(index, element.index)) {
+            !NodeProperties::GetType(index)->Maybe(
+                NodeProperties::GetType(element.index))) {
           that->elements_[that->next_index_++] = element;
         }
       }
@@ -165,6 +272,7 @@
           this_element.index == that_element.index &&
           this_element.value == that_element.value) {
         copy->elements_[copy->next_index_++] = this_element;
+        break;
       }
     }
   }
@@ -172,6 +280,17 @@
   return copy;
 }
 
+void LoadElimination::AbstractElements::Print() const {
+  for (Element const& element : elements_) {
+    if (element.object) {
+      PrintF("    #%d:%s @ #%d:%s -> #%d:%s\n", element.object->id(),
+             element.object->op()->mnemonic(), element.index->id(),
+             element.index->op()->mnemonic(), element.value->id(),
+             element.value->op()->mnemonic());
+    }
+  }
+}
+
 Node* LoadElimination::AbstractField::Lookup(Node* object) const {
   for (auto pair : info_for_node_) {
     if (MustAlias(object, pair.first)) return pair.second;
@@ -193,7 +312,22 @@
   return this;
 }
 
+void LoadElimination::AbstractField::Print() const {
+  for (auto pair : info_for_node_) {
+    PrintF("    #%d:%s -> #%d:%s\n", pair.first->id(),
+           pair.first->op()->mnemonic(), pair.second->id(),
+           pair.second->op()->mnemonic());
+  }
+}
+
 bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
+  if (this->checks_) {
+    if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
+      return false;
+    }
+  } else if (that->checks_) {
+    return false;
+  }
   if (this->elements_) {
     if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
       return false;
@@ -215,13 +349,17 @@
 
 void LoadElimination::AbstractState::Merge(AbstractState const* that,
                                            Zone* zone) {
+  // Merge the information we have about the checks.
+  if (this->checks_) {
+    this->checks_ =
+        that->checks_ ? that->checks_->Merge(this->checks_, zone) : nullptr;
+  }
+
   // Merge the information we have about the elements.
   if (this->elements_) {
     this->elements_ = that->elements_
                           ? that->elements_->Merge(this->elements_, zone)
-                          : that->elements_;
-  } else {
-    this->elements_ = that->elements_;
+                          : nullptr;
   }
 
   // Merge the information we have about the fields.
@@ -236,6 +374,21 @@
   }
 }
 
+Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
+  return this->checks_ ? this->checks_->Lookup(node) : nullptr;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddCheck(
+    Node* node, Zone* zone) const {
+  AbstractState* that = new (zone) AbstractState(*this);
+  if (that->checks_) {
+    that->checks_ = that->checks_->Extend(node, zone);
+  } else {
+    that->checks_ = new (zone) AbstractChecks(node, zone);
+  }
+  return that;
+}
+
 Node* LoadElimination::AbstractState::LookupElement(Node* object,
                                                     Node* index) const {
   if (this->elements_) {
@@ -303,6 +456,23 @@
   return nullptr;
 }
 
+void LoadElimination::AbstractState::Print() const {
+  if (checks_) {
+    PrintF("   checks:\n");
+    checks_->Print();
+  }
+  if (elements_) {
+    PrintF("   elements:\n");
+    elements_->Print();
+  }
+  for (size_t i = 0; i < arraysize(fields_); ++i) {
+    if (AbstractField const* const field = fields_[i]) {
+      PrintF("   field %zu:\n", i);
+      field->Print();
+    }
+  }
+}
+
 LoadElimination::AbstractState const*
 LoadElimination::AbstractStateForEffectNodes::Get(Node* node) const {
   size_t const id = node->id();
@@ -317,13 +487,26 @@
   info_for_node_[id] = state;
 }
 
+Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  if (Node* const check = state->LookupCheck(node)) {
+    ReplaceWithValue(node, check, effect);
+    return Replace(check);
+  }
+  state = state->AddCheck(node, zone());
+  return UpdateState(node, state);
+}
+
 Reduction LoadElimination::ReduceCheckMaps(Node* node) {
   Node* const object = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
   int const map_input_count = node->op()->ValueInputCount() - 1;
-  if (Node* const object_map = state->LookupField(object, 0)) {
+  if (Node* const object_map =
+          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
     for (int i = 0; i < map_input_count; ++i) {
       Node* map = NodeProperties::GetValueInput(node, 1 + i);
       if (map == object_map) return Replace(effect);
@@ -331,7 +514,8 @@
   }
   if (map_input_count == 1) {
     Node* const map0 = NodeProperties::GetValueInput(node, 1);
-    state = state->AddField(object, 0, map0, zone());
+    state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
+                            zone());
   }
   return UpdateState(node, state);
 }
@@ -343,7 +527,8 @@
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
   Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-  if (Node* const elements_map = state->LookupField(elements, 0)) {
+  if (Node* const elements_map =
+          state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
     // Check if the {elements} already have the fixed array map.
     if (elements_map == fixed_array_map) {
       ReplaceWithValue(node, elements, effect);
@@ -351,11 +536,14 @@
     }
   }
   // We know that the resulting elements have the fixed array map.
-  state = state->AddField(node, 0, fixed_array_map, zone());
+  state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+                          fixed_array_map, zone());
   // Kill the previous elements on {object}.
-  state = state->KillField(object, 2, zone());
+  state =
+      state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
   // Add the new elements on {object}.
-  state = state->AddField(object, 2, node, zone());
+  state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
+                          zone());
   return UpdateState(node, state);
 }
 
@@ -368,20 +556,25 @@
   if (flags & GrowFastElementsFlag::kDoubleElements) {
     // We know that the resulting elements have the fixed double array map.
     Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
-    state = state->AddField(node, 0, fixed_double_array_map, zone());
+    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+                            fixed_double_array_map, zone());
   } else {
     // We know that the resulting elements have the fixed array map.
     Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-    state = state->AddField(node, 0, fixed_array_map, zone());
+    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+                            fixed_array_map, zone());
   }
   if (flags & GrowFastElementsFlag::kArrayObject) {
     // Kill the previous Array::length on {object}.
-    state = state->KillField(object, 3, zone());
+    state =
+        state->KillField(object, FieldIndexOf(JSArray::kLengthOffset), zone());
   }
   // Kill the previous elements on {object}.
-  state = state->KillField(object, 2, zone());
+  state =
+      state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
   // Add the new elements on {object}.
-  state = state->AddField(object, 2, node, zone());
+  state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
+                          zone());
   return UpdateState(node, state);
 }
 
@@ -392,18 +585,22 @@
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  if (Node* const object_map = state->LookupField(object, 0)) {
+  if (Node* const object_map =
+          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
     if (target_map == object_map) {
       // The {object} already has the {target_map}, so this TransitionElements
       // {node} is fully redundant (independent of what {source_map} is).
       return Replace(effect);
     }
-    state = state->KillField(object, 0, zone());
+    state =
+        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
     if (source_map == object_map) {
-      state = state->AddField(object, 0, target_map, zone());
+      state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
+                              target_map, zone());
     }
   } else {
-    state = state->KillField(object, 0, zone());
+    state =
+        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
   }
   ElementsTransition transition = ElementsTransitionOf(node->op());
   switch (transition) {
@@ -411,7 +608,8 @@
       break;
     case ElementsTransition::kSlowTransition:
       // Kill the elements as well.
-      state = state->KillField(object, 2, zone());
+      state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
+                               zone());
       break;
   }
   return UpdateState(node, state);
@@ -421,16 +619,21 @@
   FieldAccess const& access = FieldAccessOf(node->op());
   Node* const object = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
   int field_index = FieldIndexOf(access);
   if (field_index >= 0) {
-    if (Node* const replacement = state->LookupField(object, field_index)) {
-      // Make sure the {replacement} has at least as good type
-      // as the original {node}.
-      if (!replacement->IsDead() &&
-          NodeProperties::GetType(replacement)
-              ->Is(NodeProperties::GetType(node))) {
+    if (Node* replacement = state->LookupField(object, field_index)) {
+      // Make sure we don't resurrect dead {replacement} nodes.
+      if (!replacement->IsDead()) {
+        // We might need to guard the {replacement} if the type of the
+        // {node} is more precise than the type of the {replacement}.
+        Type* const node_type = NodeProperties::GetType(node);
+        if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+          replacement = graph()->NewNode(common()->TypeGuard(node_type),
+                                         replacement, control);
+        }
         ReplaceWithValue(node, replacement, effect);
         return Replace(replacement);
       }
@@ -468,14 +671,19 @@
   Node* const object = NodeProperties::GetValueInput(node, 0);
   Node* const index = NodeProperties::GetValueInput(node, 1);
   Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  if (Node* const replacement = state->LookupElement(object, index)) {
-    // Make sure the {replacement} has at least as good type
-    // as the original {node}.
-    if (!replacement->IsDead() &&
-        NodeProperties::GetType(replacement)
-            ->Is(NodeProperties::GetType(node))) {
+  if (Node* replacement = state->LookupElement(object, index)) {
+    // Make sure we don't resurrect dead {replacement} nodes.
+    if (!replacement->IsDead()) {
+      // We might need to guard the {replacement} if the type of the
+      // {node} is more precise than the type of the {replacement}.
+      Type* const node_type = NodeProperties::GetType(node);
+      if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+        replacement = graph()->NewNode(common()->TypeGuard(node_type),
+                                       replacement, control);
+      }
       ReplaceWithValue(node, replacement, effect);
       return Replace(replacement);
     }
@@ -620,23 +828,28 @@
         switch (current->opcode()) {
           case IrOpcode::kEnsureWritableFastElements: {
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(object, 2, zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset), zone());
             break;
           }
           case IrOpcode::kMaybeGrowFastElements: {
             GrowFastElementsFlags flags =
                 GrowFastElementsFlagsOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(object, 2, zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset), zone());
             if (flags & GrowFastElementsFlag::kArrayObject) {
-              state = state->KillField(object, 3, zone());
+              state = state->KillField(
+                  object, FieldIndexOf(JSArray::kLengthOffset), zone());
             }
             break;
           }
           case IrOpcode::kTransitionElementsKind: {
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(object, 0, zone());
-            state = state->KillField(object, 2, zone());
+            state = state->KillField(
+                object, FieldIndexOf(HeapObject::kMapOffset), zone());
+            state = state->KillField(
+                object, FieldIndexOf(JSObject::kElementsOffset), zone());
             break;
           }
           case IrOpcode::kStoreField: {
@@ -671,6 +884,14 @@
 }
 
 // static
+int LoadElimination::FieldIndexOf(int offset) {
+  DCHECK_EQ(0, offset % kPointerSize);
+  int field_index = offset / kPointerSize;
+  if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
+  return field_index;
+}
+
+// static
 int LoadElimination::FieldIndexOf(FieldAccess const& access) {
   MachineRepresentation rep = access.machine_type.representation();
   switch (rep) {
@@ -699,12 +920,15 @@
       break;
   }
   DCHECK_EQ(kTaggedBase, access.base_is_tagged);
-  DCHECK_EQ(0, access.offset % kPointerSize);
-  int field_index = access.offset / kPointerSize;
-  if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
-  return field_index;
+  return FieldIndexOf(access.offset);
 }
 
+CommonOperatorBuilder* LoadElimination::common() const {
+  return jsgraph()->common();
+}
+
+Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 2a4ee40..985e690 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -12,7 +12,9 @@
 namespace compiler {
 
 // Foward declarations.
+class CommonOperatorBuilder;
 struct FieldAccess;
+class Graph;
 class JSGraph;
 
 class LoadElimination final : public AdvancedReducer {
@@ -24,6 +26,39 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  static const size_t kMaxTrackedChecks = 8;
+
+  // Abstract state to approximate the current state of checks that are
+  // only invalidated by calls, i.e. array buffer neutering checks, along
+  // the effect paths through the graph.
+  class AbstractChecks final : public ZoneObject {
+   public:
+    explicit AbstractChecks(Zone* zone) {
+      for (size_t i = 0; i < arraysize(nodes_); ++i) {
+        nodes_[i] = nullptr;
+      }
+    }
+    AbstractChecks(Node* node, Zone* zone) : AbstractChecks(zone) {
+      nodes_[next_index_++] = node;
+    }
+
+    AbstractChecks const* Extend(Node* node, Zone* zone) const {
+      AbstractChecks* that = new (zone) AbstractChecks(*this);
+      that->nodes_[that->next_index_] = node;
+      that->next_index_ = (that->next_index_ + 1) % arraysize(nodes_);
+      return that;
+    }
+    Node* Lookup(Node* node) const;
+    bool Equals(AbstractChecks const* that) const;
+    AbstractChecks const* Merge(AbstractChecks const* that, Zone* zone) const;
+
+    void Print() const;
+
+   private:
+    Node* nodes_[kMaxTrackedChecks];
+    size_t next_index_ = 0;
+  };
+
   static const size_t kMaxTrackedElements = 8;
 
   // Abstract state to approximate the current state of an element along the
@@ -53,6 +88,8 @@
     AbstractElements const* Merge(AbstractElements const* that,
                                   Zone* zone) const;
 
+    void Print() const;
+
    private:
     struct Element {
       Element() {}
@@ -104,6 +141,8 @@
       return copy;
     }
 
+    void Print() const;
+
    private:
     ZoneMap<Node*, Node*> info_for_node_;
   };
@@ -133,7 +172,13 @@
                                      Zone* zone) const;
     Node* LookupElement(Node* object, Node* index) const;
 
+    AbstractState const* AddCheck(Node* node, Zone* zone) const;
+    Node* LookupCheck(Node* node) const;
+
+    void Print() const;
+
    private:
+    AbstractChecks const* checks_ = nullptr;
     AbstractElements const* elements_ = nullptr;
     AbstractField const* fields_[kMaxTrackedFields];
   };
@@ -150,6 +195,7 @@
     ZoneVector<AbstractState const*> info_for_node_;
   };
 
+  Reduction ReduceArrayBufferWasNeutered(Node* node);
   Reduction ReduceCheckMaps(Node* node);
   Reduction ReduceEnsureWritableFastElements(Node* node);
   Reduction ReduceMaybeGrowFastElements(Node* node);
@@ -168,9 +214,12 @@
   AbstractState const* ComputeLoopState(Node* node,
                                         AbstractState const* state) const;
 
+  static int FieldIndexOf(int offset);
   static int FieldIndexOf(FieldAccess const& access);
 
+  CommonOperatorBuilder* common() const;
   AbstractState const* empty_state() const { return &empty_state_; }
+  Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Zone* zone() const { return node_states_.zone(); }
 
diff --git a/src/compiler/loop-analysis.cc b/src/compiler/loop-analysis.cc
index 2a81aee..f3a7933 100644
--- a/src/compiler/loop-analysis.cc
+++ b/src/compiler/loop-analysis.cc
@@ -5,10 +5,10 @@
 #include "src/compiler/loop-analysis.h"
 
 #include "src/compiler/graph.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-marker.h"
 #include "src/compiler/node-properties.h"
-#include "src/zone.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index a8c3bca..2d0f27b 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -8,7 +8,7 @@
 #include "src/base/iterator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc
index 9535df5..5f8857c 100644
--- a/src/compiler/loop-peeling.cc
+++ b/src/compiler/loop-peeling.cc
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/loop-peeling.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/loop-peeling.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-marker.h"
 #include "src/compiler/node-properties.h"
-#include "src/zone.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone.h"
 
 // Loop peeling is an optimization that copies the body of a loop, creating
 // a new copy of the body called the "peeled iteration" that represents the
diff --git a/src/compiler/loop-variable-optimizer.cc b/src/compiler/loop-variable-optimizer.cc
index 8331963..55cce26 100644
--- a/src/compiler/loop-variable-optimizer.cc
+++ b/src/compiler/loop-variable-optimizer.cc
@@ -9,8 +9,8 @@
 #include "src/compiler/node-marker.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -28,7 +28,7 @@
     : graph_(graph),
       common_(common),
       zone_(zone),
-      limits_(zone),
+      limits_(graph->NodeCount(), zone),
       induction_vars_(zone) {}
 
 void LoopVariableOptimizer::Run() {
@@ -40,14 +40,13 @@
     queue.pop();
     queued.Set(node, false);
 
-    DCHECK(limits_.find(node->id()) == limits_.end());
+    DCHECK_NULL(limits_[node->id()]);
     bool all_inputs_visited = true;
     int inputs_end = (node->opcode() == IrOpcode::kLoop)
                          ? kFirstBackedge
                          : node->op()->ControlInputCount();
     for (int i = 0; i < inputs_end; i++) {
-      auto input = limits_.find(NodeProperties::GetControlInput(node, i)->id());
-      if (input == limits_.end()) {
+      if (limits_[NodeProperties::GetControlInput(node, i)->id()] == nullptr) {
         all_inputs_visited = false;
         break;
       }
@@ -55,7 +54,7 @@
     if (!all_inputs_visited) continue;
 
     VisitNode(node);
-    DCHECK(limits_.find(node->id()) != limits_.end());
+    DCHECK_NOT_NULL(limits_[node->id()]);
 
     // Queue control outputs.
     for (Edge edge : node->use_edges()) {
diff --git a/src/compiler/loop-variable-optimizer.h b/src/compiler/loop-variable-optimizer.h
index a5c1ad4..8054ec1 100644
--- a/src/compiler/loop-variable-optimizer.h
+++ b/src/compiler/loop-variable-optimizer.h
@@ -5,7 +5,7 @@
 #ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
 #define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -106,7 +106,7 @@
   Graph* graph_;
   CommonOperatorBuilder* common_;
   Zone* zone_;
-  ZoneMap<int, const VariableLimits*> limits_;
+  ZoneVector<const VariableLimits*> limits_;
   ZoneMap<int, InductionVariable*> induction_vars_;
 };
 
diff --git a/src/compiler/machine-graph-verifier.cc b/src/compiler/machine-graph-verifier.cc
new file mode 100644
index 0000000..d33ee4e
--- /dev/null
+++ b/src/compiler/machine-graph-verifier.cc
@@ -0,0 +1,667 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-graph-verifier.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+class MachineRepresentationInferrer {
+ public:
+  MachineRepresentationInferrer(Schedule const* schedule, Graph const* graph,
+                                Linkage* linkage, Zone* zone)
+      : schedule_(schedule),
+        linkage_(linkage),
+        representation_vector_(graph->NodeCount(), zone) {
+    Run();
+  }
+
+  MachineRepresentation GetRepresentation(Node const* node) const {
+    return representation_vector_.at(node->id());
+  }
+
+ private:
+  MachineRepresentation GetProjectionType(Node const* projection) {
+    size_t index = ProjectionIndexOf(projection->op());
+    Node* input = projection->InputAt(0);
+    switch (input->opcode()) {
+      case IrOpcode::kInt32AddWithOverflow:
+      case IrOpcode::kInt32SubWithOverflow:
+      case IrOpcode::kInt32MulWithOverflow:
+        CHECK_LE(index, static_cast<size_t>(1));
+        return index == 0 ? MachineRepresentation::kWord32
+                          : MachineRepresentation::kBit;
+      case IrOpcode::kInt64AddWithOverflow:
+      case IrOpcode::kInt64SubWithOverflow:
+        CHECK_LE(index, static_cast<size_t>(1));
+        return index == 0 ? MachineRepresentation::kWord64
+                          : MachineRepresentation::kBit;
+      case IrOpcode::kTryTruncateFloat32ToInt64:
+      case IrOpcode::kTryTruncateFloat64ToInt64:
+      case IrOpcode::kTryTruncateFloat32ToUint64:
+      case IrOpcode::kTryTruncateFloat64ToUint64:
+        CHECK_LE(index, static_cast<size_t>(1));
+        return index == 0 ? MachineRepresentation::kWord64
+                          : MachineRepresentation::kBit;
+      case IrOpcode::kCall: {
+        CallDescriptor const* desc = CallDescriptorOf(input->op());
+        return desc->GetReturnType(index).representation();
+      }
+      default:
+        return MachineRepresentation::kNone;
+    }
+  }
+
+  void Run() {
+    auto blocks = schedule_->all_blocks();
+    for (BasicBlock* block : *blocks) {
+      for (size_t i = 0; i <= block->NodeCount(); ++i) {
+        Node const* node =
+            i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
+        if (node == nullptr) {
+          DCHECK_EQ(block->NodeCount(), i);
+          break;
+        }
+        switch (node->opcode()) {
+          case IrOpcode::kParameter:
+            representation_vector_[node->id()] =
+                linkage_->GetParameterType(ParameterIndexOf(node->op()))
+                    .representation();
+            break;
+          case IrOpcode::kProjection: {
+            representation_vector_[node->id()] = GetProjectionType(node);
+          } break;
+          case IrOpcode::kTypedStateValues:
+            representation_vector_[node->id()] = MachineRepresentation::kNone;
+            break;
+          case IrOpcode::kAtomicLoad:
+          case IrOpcode::kLoad:
+          case IrOpcode::kProtectedLoad:
+            representation_vector_[node->id()] =
+                LoadRepresentationOf(node->op()).representation();
+            break;
+          case IrOpcode::kCheckedLoad:
+            representation_vector_[node->id()] =
+                CheckedLoadRepresentationOf(node->op()).representation();
+            break;
+          case IrOpcode::kLoadStackPointer:
+          case IrOpcode::kLoadFramePointer:
+          case IrOpcode::kLoadParentFramePointer:
+            representation_vector_[node->id()] =
+                MachineType::PointerRepresentation();
+            break;
+          case IrOpcode::kPhi:
+            representation_vector_[node->id()] =
+                PhiRepresentationOf(node->op());
+            break;
+          case IrOpcode::kCall: {
+            CallDescriptor const* desc = CallDescriptorOf(node->op());
+            if (desc->ReturnCount() > 0) {
+              representation_vector_[node->id()] =
+                  desc->GetReturnType(0).representation();
+            } else {
+              representation_vector_[node->id()] =
+                  MachineRepresentation::kTagged;
+            }
+            break;
+          }
+          case IrOpcode::kUnalignedLoad:
+            representation_vector_[node->id()] =
+                UnalignedLoadRepresentationOf(node->op()).representation();
+            break;
+          case IrOpcode::kHeapConstant:
+          case IrOpcode::kNumberConstant:
+          case IrOpcode::kChangeBitToTagged:
+          case IrOpcode::kIfException:
+          case IrOpcode::kOsrValue:
+          case IrOpcode::kChangeInt32ToTagged:
+          case IrOpcode::kChangeUint32ToTagged:
+          case IrOpcode::kBitcastWordToTagged:
+            representation_vector_[node->id()] = MachineRepresentation::kTagged;
+            break;
+          case IrOpcode::kExternalConstant:
+            representation_vector_[node->id()] =
+                MachineType::PointerRepresentation();
+            break;
+          case IrOpcode::kBitcastTaggedToWord:
+            representation_vector_[node->id()] =
+                MachineType::PointerRepresentation();
+            break;
+          case IrOpcode::kBitcastWordToTaggedSigned:
+            representation_vector_[node->id()] =
+                MachineRepresentation::kTaggedSigned;
+            break;
+          case IrOpcode::kWord32Equal:
+          case IrOpcode::kInt32LessThan:
+          case IrOpcode::kInt32LessThanOrEqual:
+          case IrOpcode::kUint32LessThan:
+          case IrOpcode::kUint32LessThanOrEqual:
+          case IrOpcode::kWord64Equal:
+          case IrOpcode::kInt64LessThan:
+          case IrOpcode::kInt64LessThanOrEqual:
+          case IrOpcode::kUint64LessThan:
+          case IrOpcode::kUint64LessThanOrEqual:
+          case IrOpcode::kFloat32Equal:
+          case IrOpcode::kFloat32LessThan:
+          case IrOpcode::kFloat32LessThanOrEqual:
+          case IrOpcode::kFloat64Equal:
+          case IrOpcode::kFloat64LessThan:
+          case IrOpcode::kFloat64LessThanOrEqual:
+          case IrOpcode::kChangeTaggedToBit:
+            representation_vector_[node->id()] = MachineRepresentation::kBit;
+            break;
+#define LABEL(opcode) case IrOpcode::k##opcode:
+          case IrOpcode::kTruncateInt64ToInt32:
+          case IrOpcode::kTruncateFloat32ToInt32:
+          case IrOpcode::kTruncateFloat32ToUint32:
+          case IrOpcode::kBitcastFloat32ToInt32:
+          case IrOpcode::kInt32x4ExtractLane:
+          case IrOpcode::kInt32Constant:
+          case IrOpcode::kRelocatableInt32Constant:
+          case IrOpcode::kTruncateFloat64ToWord32:
+          case IrOpcode::kTruncateFloat64ToUint32:
+          case IrOpcode::kChangeFloat64ToInt32:
+          case IrOpcode::kChangeFloat64ToUint32:
+          case IrOpcode::kRoundFloat64ToInt32:
+          case IrOpcode::kFloat64ExtractLowWord32:
+          case IrOpcode::kFloat64ExtractHighWord32:
+            MACHINE_UNOP_32_LIST(LABEL)
+            MACHINE_BINOP_32_LIST(LABEL) {
+              representation_vector_[node->id()] =
+                  MachineRepresentation::kWord32;
+            }
+            break;
+          case IrOpcode::kChangeInt32ToInt64:
+          case IrOpcode::kChangeUint32ToUint64:
+          case IrOpcode::kInt64Constant:
+          case IrOpcode::kRelocatableInt64Constant:
+          case IrOpcode::kBitcastFloat64ToInt64:
+            MACHINE_BINOP_64_LIST(LABEL) {
+              representation_vector_[node->id()] =
+                  MachineRepresentation::kWord64;
+            }
+            break;
+          case IrOpcode::kRoundInt32ToFloat32:
+          case IrOpcode::kRoundUint32ToFloat32:
+          case IrOpcode::kRoundInt64ToFloat32:
+          case IrOpcode::kRoundUint64ToFloat32:
+          case IrOpcode::kFloat32Constant:
+          case IrOpcode::kTruncateFloat64ToFloat32:
+            MACHINE_FLOAT32_BINOP_LIST(LABEL)
+            MACHINE_FLOAT32_UNOP_LIST(LABEL) {
+              representation_vector_[node->id()] =
+                  MachineRepresentation::kFloat32;
+            }
+            break;
+          case IrOpcode::kRoundInt64ToFloat64:
+          case IrOpcode::kRoundUint64ToFloat64:
+          case IrOpcode::kChangeFloat32ToFloat64:
+          case IrOpcode::kChangeInt32ToFloat64:
+          case IrOpcode::kChangeUint32ToFloat64:
+          case IrOpcode::kFloat64Constant:
+          case IrOpcode::kFloat64SilenceNaN:
+            MACHINE_FLOAT64_BINOP_LIST(LABEL)
+            MACHINE_FLOAT64_UNOP_LIST(LABEL) {
+              representation_vector_[node->id()] =
+                  MachineRepresentation::kFloat64;
+            }
+            break;
+#undef LABEL
+          default:
+            break;
+        }
+      }
+    }
+  }
+
+  Schedule const* const schedule_;
+  Linkage const* const linkage_;
+  ZoneVector<MachineRepresentation> representation_vector_;
+};
+
+class MachineRepresentationChecker {
+ public:
+  MachineRepresentationChecker(Schedule const* const schedule,
+                               MachineRepresentationInferrer const* const typer)
+      : schedule_(schedule), typer_(typer) {}
+
+  void Run() {
+    BasicBlockVector const* blocks = schedule_->all_blocks();
+    for (BasicBlock* block : *blocks) {
+      for (size_t i = 0; i <= block->NodeCount(); ++i) {
+        Node const* node =
+            i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
+        if (node == nullptr) {
+          DCHECK_EQ(block->NodeCount(), i);
+          break;
+        }
+        switch (node->opcode()) {
+          case IrOpcode::kCall:
+          case IrOpcode::kTailCall:
+            CheckCallInputs(node);
+            break;
+          case IrOpcode::kChangeBitToTagged:
+            CHECK_EQ(MachineRepresentation::kBit,
+                     typer_->GetRepresentation(node->InputAt(0)));
+            break;
+          case IrOpcode::kChangeTaggedToBit:
+            CHECK_EQ(MachineRepresentation::kTagged,
+                     typer_->GetRepresentation(node->InputAt(0)));
+            break;
+          case IrOpcode::kRoundInt64ToFloat64:
+          case IrOpcode::kRoundUint64ToFloat64:
+          case IrOpcode::kRoundInt64ToFloat32:
+          case IrOpcode::kRoundUint64ToFloat32:
+          case IrOpcode::kTruncateInt64ToInt32:
+            CheckValueInputForInt64Op(node, 0);
+            break;
+          case IrOpcode::kBitcastWordToTagged:
+          case IrOpcode::kBitcastWordToTaggedSigned:
+            CheckValueInputRepresentationIs(
+                node, 0, MachineType::PointerRepresentation());
+            break;
+          case IrOpcode::kBitcastTaggedToWord:
+            CheckValueInputIsTagged(node, 0);
+            break;
+          case IrOpcode::kTruncateFloat64ToWord32:
+          case IrOpcode::kTruncateFloat64ToUint32:
+          case IrOpcode::kTruncateFloat64ToFloat32:
+          case IrOpcode::kChangeFloat64ToInt32:
+          case IrOpcode::kChangeFloat64ToUint32:
+          case IrOpcode::kRoundFloat64ToInt32:
+          case IrOpcode::kFloat64ExtractLowWord32:
+          case IrOpcode::kFloat64ExtractHighWord32:
+          case IrOpcode::kBitcastFloat64ToInt64:
+            CheckValueInputForFloat64Op(node, 0);
+            break;
+          case IrOpcode::kWord64Equal:
+            CheckValueInputIsTaggedOrPointer(node, 0);
+            CheckValueInputRepresentationIs(
+                node, 1, typer_->GetRepresentation(node->InputAt(0)));
+            break;
+          case IrOpcode::kInt64LessThan:
+          case IrOpcode::kInt64LessThanOrEqual:
+          case IrOpcode::kUint64LessThan:
+          case IrOpcode::kUint64LessThanOrEqual:
+            CheckValueInputForInt64Op(node, 0);
+            CheckValueInputForInt64Op(node, 1);
+            break;
+          case IrOpcode::kInt32x4ExtractLane:
+            CheckValueInputRepresentationIs(node, 0,
+                                            MachineRepresentation::kSimd128);
+            break;
+#define LABEL(opcode) case IrOpcode::k##opcode:
+          case IrOpcode::kChangeInt32ToTagged:
+          case IrOpcode::kChangeUint32ToTagged:
+          case IrOpcode::kChangeInt32ToFloat64:
+          case IrOpcode::kChangeUint32ToFloat64:
+          case IrOpcode::kRoundInt32ToFloat32:
+          case IrOpcode::kRoundUint32ToFloat32:
+          case IrOpcode::kChangeInt32ToInt64:
+          case IrOpcode::kChangeUint32ToUint64:
+            MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
+            break;
+          case IrOpcode::kWord32Equal:
+          case IrOpcode::kInt32LessThan:
+          case IrOpcode::kInt32LessThanOrEqual:
+          case IrOpcode::kUint32LessThan:
+          case IrOpcode::kUint32LessThanOrEqual:
+            MACHINE_BINOP_32_LIST(LABEL) {
+              CheckValueInputForInt32Op(node, 0);
+              CheckValueInputForInt32Op(node, 1);
+            }
+            break;
+            MACHINE_BINOP_64_LIST(LABEL) {
+              CheckValueInputForInt64Op(node, 0);
+              CheckValueInputForInt64Op(node, 1);
+            }
+            break;
+          case IrOpcode::kFloat32Equal:
+          case IrOpcode::kFloat32LessThan:
+          case IrOpcode::kFloat32LessThanOrEqual:
+            MACHINE_FLOAT32_BINOP_LIST(LABEL) {
+              CheckValueInputForFloat32Op(node, 0);
+              CheckValueInputForFloat32Op(node, 1);
+            }
+            break;
+          case IrOpcode::kChangeFloat32ToFloat64:
+          case IrOpcode::kTruncateFloat32ToInt32:
+          case IrOpcode::kTruncateFloat32ToUint32:
+          case IrOpcode::kBitcastFloat32ToInt32:
+            MACHINE_FLOAT32_UNOP_LIST(LABEL) {
+              CheckValueInputForFloat32Op(node, 0);
+            }
+            break;
+          case IrOpcode::kFloat64Equal:
+          case IrOpcode::kFloat64LessThan:
+          case IrOpcode::kFloat64LessThanOrEqual:
+            MACHINE_FLOAT64_BINOP_LIST(LABEL) {
+              CheckValueInputForFloat64Op(node, 0);
+              CheckValueInputForFloat64Op(node, 1);
+            }
+            break;
+          case IrOpcode::kFloat64SilenceNaN:
+            MACHINE_FLOAT64_UNOP_LIST(LABEL) {
+              CheckValueInputForFloat64Op(node, 0);
+            }
+            break;
+#undef LABEL
+          case IrOpcode::kParameter:
+          case IrOpcode::kProjection:
+            break;
+          case IrOpcode::kLoad:
+          case IrOpcode::kAtomicLoad:
+            CheckValueInputIsTaggedOrPointer(node, 0);
+            CheckValueInputRepresentationIs(
+                node, 1, MachineType::PointerRepresentation());
+            break;
+          case IrOpcode::kStore:
+            CheckValueInputIsTaggedOrPointer(node, 0);
+            CheckValueInputRepresentationIs(
+                node, 1, MachineType::PointerRepresentation());
+            switch (StoreRepresentationOf(node->op()).representation()) {
+              case MachineRepresentation::kTagged:
+              case MachineRepresentation::kTaggedPointer:
+              case MachineRepresentation::kTaggedSigned:
+                CheckValueInputIsTagged(node, 2);
+                break;
+              default:
+                CheckValueInputRepresentationIs(
+                    node, 2,
+                    StoreRepresentationOf(node->op()).representation());
+            }
+            break;
+          case IrOpcode::kAtomicStore:
+            CheckValueInputIsTaggedOrPointer(node, 0);
+            CheckValueInputRepresentationIs(
+                node, 1, MachineType::PointerRepresentation());
+            switch (AtomicStoreRepresentationOf(node->op())) {
+              case MachineRepresentation::kTagged:
+              case MachineRepresentation::kTaggedPointer:
+              case MachineRepresentation::kTaggedSigned:
+                CheckValueInputIsTagged(node, 2);
+                break;
+              default:
+                CheckValueInputRepresentationIs(
+                    node, 2, AtomicStoreRepresentationOf(node->op()));
+            }
+            break;
+          case IrOpcode::kPhi:
+            switch (typer_->GetRepresentation(node)) {
+              case MachineRepresentation::kTagged:
+              case MachineRepresentation::kTaggedPointer:
+              case MachineRepresentation::kTaggedSigned:
+                for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+                  CheckValueInputIsTagged(node, i);
+                }
+                break;
+              default:
+                for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+                  CheckValueInputRepresentationIs(
+                      node, i, typer_->GetRepresentation(node));
+                }
+                break;
+            }
+            break;
+          case IrOpcode::kBranch:
+          case IrOpcode::kSwitch:
+            CheckValueInputForInt32Op(node, 0);
+            break;
+          case IrOpcode::kReturn:
+            // TODO(epertoso): use the linkage to determine which tipe we
+            // should have here.
+            break;
+          case IrOpcode::kTypedStateValues:
+          case IrOpcode::kFrameState:
+            break;
+          default:
+            if (node->op()->ValueInputCount() != 0) {
+              std::stringstream str;
+              str << "Node #" << node->id() << ":" << *node->op()
+                  << " in the machine graph is not being checked.";
+              FATAL(str.str().c_str());
+            }
+            break;
+        }
+      }
+    }
+  }
+
+ private:
+  void CheckValueInputRepresentationIs(Node const* node, int index,
+                                       MachineRepresentation representation) {
+    Node const* input = node->InputAt(index);
+    if (typer_->GetRepresentation(input) != representation) {
+      std::stringstream str;
+      str << "TypeError: node #" << node->id() << ":" << *node->op()
+          << " uses node #" << input->id() << ":" << *input->op()
+          << " which doesn't have a " << MachineReprToString(representation)
+          << " representation.";
+      FATAL(str.str().c_str());
+    }
+  }
+
+  void CheckValueInputIsTagged(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    switch (typer_->GetRepresentation(input)) {
+      case MachineRepresentation::kTagged:
+      case MachineRepresentation::kTaggedPointer:
+      case MachineRepresentation::kTaggedSigned:
+        return;
+      default:
+        break;
+    }
+    std::ostringstream str;
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op()
+        << " which doesn't have a tagged representation.";
+    FATAL(str.str().c_str());
+  }
+
+  void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    switch (typer_->GetRepresentation(input)) {
+      case MachineRepresentation::kTagged:
+      case MachineRepresentation::kTaggedPointer:
+      case MachineRepresentation::kTaggedSigned:
+        return;
+      default:
+        break;
+    }
+    if (typer_->GetRepresentation(input) !=
+        MachineType::PointerRepresentation()) {
+      std::ostringstream str;
+      str << "TypeError: node #" << node->id() << ":" << *node->op()
+          << " uses node #" << input->id() << ":" << *input->op()
+          << " which doesn't have a tagged or pointer representation.";
+      FATAL(str.str().c_str());
+    }
+  }
+
+  void CheckValueInputForInt32Op(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    switch (typer_->GetRepresentation(input)) {
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord32:
+        return;
+      case MachineRepresentation::kNone: {
+        std::ostringstream str;
+        str << "TypeError: node #" << input->id() << ":" << *input->op()
+            << " is untyped.";
+        FATAL(str.str().c_str());
+        break;
+      }
+      default:
+        break;
+    }
+    std::ostringstream str;
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op()
+        << " which doesn't have an int32-compatible representation.";
+    FATAL(str.str().c_str());
+  }
+
+  void CheckValueInputForInt64Op(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    switch (typer_->GetRepresentation(input)) {
+      case MachineRepresentation::kWord64:
+        return;
+      case MachineRepresentation::kNone: {
+        std::ostringstream str;
+        str << "TypeError: node #" << input->id() << ":" << *input->op()
+            << " is untyped.";
+        FATAL(str.str().c_str());
+        break;
+      }
+
+      default:
+        break;
+    }
+    std::ostringstream str;
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op()
+        << " which doesn't have a kWord64 representation.";
+    FATAL(str.str().c_str());
+  }
+
+  void CheckValueInputForFloat32Op(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    if (MachineRepresentation::kFloat32 == typer_->GetRepresentation(input)) {
+      return;
+    }
+    std::ostringstream str;
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op()
+        << " which doesn't have a kFloat32 representation.";
+    FATAL(str.str().c_str());
+  }
+
+  void CheckValueInputForFloat64Op(Node const* node, int index) {
+    Node const* input = node->InputAt(index);
+    if (MachineRepresentation::kFloat64 == typer_->GetRepresentation(input)) {
+      return;
+    }
+    std::ostringstream str;
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op()
+        << " which doesn't have a kFloat64 representation.";
+    FATAL(str.str().c_str());
+  }
+
+  void CheckCallInputs(Node const* node) {
+    CallDescriptor const* desc = CallDescriptorOf(node->op());
+    std::ostringstream str;
+    bool should_log_error = false;
+    for (size_t i = 0; i < desc->InputCount(); ++i) {
+      Node const* input = node->InputAt(static_cast<int>(i));
+      MachineRepresentation const input_type = typer_->GetRepresentation(input);
+      MachineRepresentation const expected_input_type =
+          desc->GetInputType(i).representation();
+      if (!IsCompatible(expected_input_type, input_type)) {
+        if (!should_log_error) {
+          should_log_error = true;
+          str << "TypeError: node #" << node->id() << ":" << *node->op()
+              << " has wrong type for:" << std::endl;
+        } else {
+          str << std::endl;
+        }
+        str << " * input " << i << " (" << input->id() << ":" << *input->op()
+            << ") doesn't have a " << MachineReprToString(expected_input_type)
+            << " representation.";
+      }
+    }
+    if (should_log_error) {
+      FATAL(str.str().c_str());
+    }
+  }
+
+  bool Intersect(MachineRepresentation lhs, MachineRepresentation rhs) {
+    return (GetRepresentationProperties(lhs) &
+            GetRepresentationProperties(rhs)) != 0;
+  }
+
+  enum RepresentationProperties { kIsPointer = 1, kIsTagged = 2 };
+
+  int GetRepresentationProperties(MachineRepresentation representation) {
+    switch (representation) {
+      case MachineRepresentation::kTagged:
+      case MachineRepresentation::kTaggedPointer:
+        return kIsPointer | kIsTagged;
+      case MachineRepresentation::kTaggedSigned:
+        return kIsTagged;
+      case MachineRepresentation::kWord32:
+        return MachineRepresentation::kWord32 ==
+                       MachineType::PointerRepresentation()
+                   ? kIsPointer
+                   : 0;
+      case MachineRepresentation::kWord64:
+        return MachineRepresentation::kWord64 ==
+                       MachineType::PointerRepresentation()
+                   ? kIsPointer
+                   : 0;
+      default:
+        return 0;
+    }
+  }
+
+  bool IsCompatible(MachineRepresentation expected,
+                    MachineRepresentation actual) {
+    switch (expected) {
+      case MachineRepresentation::kTagged:
+        return (actual == MachineRepresentation::kTagged ||
+                actual == MachineRepresentation::kTaggedSigned ||
+                actual == MachineRepresentation::kTaggedPointer);
+      case MachineRepresentation::kTaggedSigned:
+      case MachineRepresentation::kTaggedPointer:
+      case MachineRepresentation::kFloat32:
+      case MachineRepresentation::kFloat64:
+      case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord64:
+        return expected == actual;
+        break;
+      case MachineRepresentation::kWord32:
+        return (actual == MachineRepresentation::kBit ||
+                actual == MachineRepresentation::kWord8 ||
+                actual == MachineRepresentation::kWord16 ||
+                actual == MachineRepresentation::kWord32);
+      case MachineRepresentation::kNone:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  Schedule const* const schedule_;
+  MachineRepresentationInferrer const* const typer_;
+};
+
+}  // namespace
+
+void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
+                               Linkage* linkage, Zone* temp_zone) {
+  MachineRepresentationInferrer representation_inferrer(schedule, graph,
+                                                        linkage, temp_zone);
+  MachineRepresentationChecker checker(schedule, &representation_inferrer);
+  checker.Run();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-graph-verifier.h b/src/compiler/machine-graph-verifier.h
new file mode 100644
index 0000000..b7d7b61
--- /dev/null
+++ b/src/compiler/machine-graph-verifier.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
+#define V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+class Zone;
+namespace compiler {
+
+class Graph;
+class Linkage;
+class Schedule;
+
+// Verifies properties of a scheduled graph, such as that the nodes' inputs are
+// of the correct type.
+class MachineGraphVerifier {
+ public:
+  static void Run(Graph* graph, Schedule const* const schedule,
+                  Linkage* linkage, Zone* temp_zone);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 99044aa..0ad20f0 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -150,21 +150,8 @@
       return ReduceWord32And(node);
     case IrOpcode::kWord32Or:
       return ReduceWord32Or(node);
-    case IrOpcode::kWord32Xor: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
-      if (m.IsFoldable()) {                                  // K ^ K => K
-        return ReplaceInt32(m.left().Value() ^ m.right().Value());
-      }
-      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
-      if (m.left().IsWord32Xor() && m.right().Is(-1)) {
-        Int32BinopMatcher mleft(m.left().node());
-        if (mleft.right().Is(-1)) {  // (x ^ -1) ^ -1 => x
-          return Replace(mleft.left().node());
-        }
-      }
-      break;
-    }
+    case IrOpcode::kWord32Xor:
+      return ReduceWord32Xor(node);
     case IrOpcode::kWord32Shl:
       return ReduceWord32Shl(node);
     case IrOpcode::kWord64Shl:
@@ -418,6 +405,11 @@
       if (m.IsFoldable()) {  // K * K => K
         return ReplaceFloat64(m.left().Value() * m.right().Value());
       }
+      if (m.right().Is(2)) {  // x * 2.0 => x + x
+        node->ReplaceInput(1, m.left().node());
+        NodeProperties::ChangeOp(node, machine()->Float64Add());
+        return Changed(node);
+      }
       break;
     }
     case IrOpcode::kFloat64Div: {
@@ -432,6 +424,19 @@
       if (m.IsFoldable()) {  // K / K => K
         return ReplaceFloat64(m.left().Value() / m.right().Value());
       }
+      if (m.right().Is(-1)) {  // x / -1.0 => -x
+        node->RemoveInput(1);
+        NodeProperties::ChangeOp(node, machine()->Float64Neg());
+        return Changed(node);
+      }
+      if (m.right().IsNormal() && m.right().IsPositiveOrNegativePowerOf2()) {
+        // All reciprocals of non-denormal powers of two can be represented
+        // exactly, so division by power of two can be reduced to
+        // multiplication by reciprocal, with the same result.
+        node->ReplaceInput(1, Float64Constant(1.0 / m.right().Value()));
+        NodeProperties::ChangeOp(node, machine()->Float64Mul());
+        return Changed(node);
+      }
       break;
     }
     case IrOpcode::kFloat64Mod: {
@@ -541,8 +546,9 @@
     }
     case IrOpcode::kFloat64Pow: {
       Float64BinopMatcher m(node);
-      // TODO(bmeurer): Constant fold once we have a unified pow implementation.
-      if (m.right().Is(0.0)) {  // x ** +-0.0 => 1.0
+      if (m.IsFoldable()) {
+        return ReplaceFloat64(Pow(m.left().Value(), m.right().Value()));
+      } else if (m.right().Is(0.0)) {  // x ** +-0.0 => 1.0
         return ReplaceFloat64(1.0);
       } else if (m.right().Is(-2.0)) {  // x ** -2.0 => 1 / (x * x)
         node->ReplaceInput(0, Float64Constant(1.0));
@@ -1221,22 +1227,17 @@
   return NoChange();
 }
 
-
-Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
-  DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
+  DCHECK(IrOpcode::kWord32Or == node->opcode() ||
+         IrOpcode::kWord32Xor == node->opcode());
   Int32BinopMatcher m(node);
-  if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
-  if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
-  if (m.IsFoldable()) {                                    // K | K  => K
-    return ReplaceInt32(m.left().Value() | m.right().Value());
-  }
-  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
-
   Node* shl = nullptr;
   Node* shr = nullptr;
-  // Recognize rotation, we are matching either:
+  // Recognize rotation, we are matching:
   //  * x << y | x >>> (32 - y) => x ror (32 - y), i.e  x rol y
   //  * x << (32 - y) | x >>> y => x ror y
+  //  * x << y ^ x >>> (32 - y) => x ror (32 - y), i.e. x rol y
+  //  * x << (32 - y) ^ x >>> y => x ror y
   // as well as their commuted form.
   if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
     shl = m.left().node();
@@ -1278,6 +1279,36 @@
   return Changed(node);
 }
 
+Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+  if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+  if (m.IsFoldable()) {                                    // K | K  => K
+    return ReplaceInt32(m.left().Value() | m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+
+  return TryMatchWord32Ror(node);
+}
+
+Reduction MachineOperatorReducer::ReduceWord32Xor(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Xor, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
+  if (m.IsFoldable()) {                                  // K ^ K => K
+    return ReplaceInt32(m.left().Value() ^ m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
+  if (m.left().IsWord32Xor() && m.right().Is(-1)) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(-1)) {  // (x ^ -1) ^ -1 => x
+      return Replace(mleft.left().node());
+    }
+  }
+
+  return TryMatchWord32Ror(node);
+}
 
 Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
   DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 167bf7e..574f45c 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -87,7 +87,9 @@
   Reduction ReduceWord32Sar(Node* node);
   Reduction ReduceWord64Sar(Node* node);
   Reduction ReduceWord32And(Node* node);
+  Reduction TryMatchWord32Ror(Node* node);
   Reduction ReduceWord32Or(Node* node);
+  Reduction ReduceWord32Xor(Node* node);
   Reduction ReduceFloat64InsertLowWord32(Node* node);
   Reduction ReduceFloat64InsertHighWord32(Node* node);
   Reduction ReduceFloat64Compare(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 43c6202..e36a61e 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -36,6 +36,7 @@
 
 LoadRepresentation LoadRepresentationOf(Operator const* op) {
   DCHECK(IrOpcode::kLoad == op->opcode() ||
+         IrOpcode::kProtectedLoad == op->opcode() ||
          IrOpcode::kAtomicLoad == op->opcode());
   return OpParameter<LoadRepresentation>(op);
 }
@@ -78,315 +79,317 @@
   return OpParameter<MachineRepresentation>(op);
 }
 
-#define PURE_OP_LIST(V)                                                      \
-  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Word32Shl, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word32Shr, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word32Sar, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word32Ror, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word32Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Word64Shl, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word64Shr, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word64Sar, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word64Ror, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Word64Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
-  V(Int32Div, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Int32Mod, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Int32LessThan, Operator::kNoProperties, 2, 0, 1)                         \
-  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Uint32Div, Operator::kNoProperties, 2, 1, 1)                             \
-  V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                             \
-  V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
-  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Int64Div, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Int64Mod, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Int64LessThan, Operator::kNoProperties, 2, 0, 1)                         \
-  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Uint64Div, Operator::kNoProperties, 2, 1, 1)                             \
-  V(Uint64Mod, Operator::kNoProperties, 2, 1, 1)                             \
-  V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                   \
-  V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)               \
-  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                \
-  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                 \
-  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)               \
-  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                \
-  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)               \
-  V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)             \
-  V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)             \
-  V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)            \
-  V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)            \
-  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1)                     \
-  V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                    \
-  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
-  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(ImpossibleToWord32, Operator::kNoProperties, 1, 0, 1)                    \
-  V(ImpossibleToWord64, Operator::kNoProperties, 1, 0, 1)                    \
-  V(ImpossibleToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(ImpossibleToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
-  V(ImpossibleToTagged, Operator::kNoProperties, 1, 0, 1)                    \
-  V(ImpossibleToBit, Operator::kNoProperties, 1, 0, 1)                       \
-  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1)              \
-  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
-  V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1)                 \
-  V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                 \
-  V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
-  V(Float32Abs, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float32Add, Operator::kCommutative, 2, 0, 1)                             \
-  V(Float32Sub, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float32Mul, Operator::kCommutative, 2, 0, 1)                             \
-  V(Float32Div, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float32Neg, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Acos, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Acosh, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Asin, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Asinh, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Atan, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Atan2, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Float64Atanh, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Cos, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Cosh, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Exp, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Expm1, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Log, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Log1p, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Log2, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Log10, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Float64Neg, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Add, Operator::kCommutative, 2, 0, 1)                             \
-  V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float64Mul, Operator::kCommutative, 2, 0, 1)                             \
-  V(Float64Div, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float64Pow, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Float64Sin, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Sinh, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Tan, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Tanh, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float32Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float32LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Float64Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float64LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1)               \
-  V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1)              \
-  V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                \
-  V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)               \
-  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                      \
-  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                      \
-  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                \
-  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                          \
-  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                          \
-  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                          \
-  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                         \
-  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                         \
-  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                         \
-  V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                       \
-  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
-  V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                         \
-  V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1)                  \
-  V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1)              \
-  V(Float32x4Add, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Float32x4Mul, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float32x4Div, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Float32x4Min, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float32x4Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1)                        \
-  V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1)                        \
-  V(Float32x4Equal, Operator::kCommutative, 2, 0, 1)                         \
-  V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1)                      \
-  V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
-  V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                      \
-  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                      \
-  V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                  \
-  V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)                 \
-  V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                         \
-  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                        \
-  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                        \
-  V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                  \
-  V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                      \
-  V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
-  V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
-  V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                 \
-  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                        \
-  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
-  V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                       \
-  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                       \
-  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                          \
-  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                       \
-  V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                         \
-  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
-  V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                        \
-  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                       \
-  V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                    \
-  V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                      \
-  V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
-  V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
-  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                        \
-  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
-  V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                       \
-  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                      \
-  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                          \
-  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                       \
-  V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                        \
-  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
-  V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                       \
-  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                       \
-  V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                    \
-  V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                            \
-  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                      \
-  V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
-  V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
-  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                       \
-  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
-  V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
-  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                      \
-  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                      \
-  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                          \
-  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                       \
-  V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Simd128Load3, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Simd128Store, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Simd128Store1, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Simd128Store2, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Simd128Store3, Operator::kNoProperties, 3, 0, 1)                         \
-  V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+#define PURE_BINARY_OP_LIST_32(V)                                           \
+  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Word32Shl, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Word32Shr, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Word32Sar, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Word32Ror, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Word32Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Int32Div, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Int32Mod, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Int32LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint32Div, Operator::kNoProperties, 2, 1, 1)                            \
+  V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                            \
+  V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)
+
+#define PURE_BINARY_OP_LIST_64(V)                                        \
+  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Word64Shl, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Word64Shr, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Word64Sar, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Word64Ror, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Word64Equal, Operator::kCommutative, 2, 0, 1)                        \
+  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Int64Div, Operator::kNoProperties, 2, 1, 1)                          \
+  V(Int64Mod, Operator::kNoProperties, 2, 1, 1)                          \
+  V(Int64LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Uint64Div, Operator::kNoProperties, 2, 1, 1)                         \
+  V(Uint64Mod, Operator::kNoProperties, 2, 1, 1)                         \
+  V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)
+
+#define PURE_OP_LIST(V)                                                    \
+  PURE_BINARY_OP_LIST_32(V)                                                \
+  PURE_BINARY_OP_LIST_64(V)                                                \
+  V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                           \
+  V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1)                 \
+  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                 \
+  V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1)           \
+  V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)             \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)              \
+  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                \
+  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)               \
+  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)             \
+  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)              \
+  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)             \
+  V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)           \
+  V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)           \
+  V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)          \
+  V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)          \
+  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                \
+  V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
+  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                \
+  V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                \
+  V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                \
+  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1)               \
+  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1)                \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1)            \
+  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1)                \
+  V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)               \
+  V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1)               \
+  V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)               \
+  V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)               \
+  V(Float32Abs, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float32Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float32Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32Div, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float32Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Acos, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Acosh, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Asin, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Asinh, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Atan, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Atan2, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Float64Atanh, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Cos, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Cosh, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Exp, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Expm1, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Log, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Log1p, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Log2, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Log10, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Float64Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float64Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float64Div, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float64Pow, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float64Sin, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Sinh, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float64Tan, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Tanh, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float32Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Float64Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float64LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1)             \
+  V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1)            \
+  V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)              \
+  V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)             \
+  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                    \
+  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                    \
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)              \
+  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                        \
+  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                        \
+  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                        \
+  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                       \
+  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                       \
+  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                       \
+  V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                     \
+  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                \
+  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                \
+  V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1)                \
+  V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1)            \
+  V(Float32x4Add, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Float32x4Mul, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4Div, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Float32x4Min, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4Max, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1)                      \
+  V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1)                      \
+  V(Float32x4Equal, Operator::kCommutative, 2, 0, 1)                       \
+  V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1)                    \
+  V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
+  V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                \
+  V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)         \
+  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                     \
+  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                    \
+  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                    \
+  V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                \
+  V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)               \
+  V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                       \
+  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
+  V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                      \
+  V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                      \
+  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                      \
+  V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                \
+  V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
+  V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
+  V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)               \
+  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                      \
+  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
+  V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                     \
+  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                     \
+  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                        \
+  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                     \
+  V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                       \
+  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
+  V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
+  V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                      \
+  V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                      \
+  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                     \
+  V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
+  V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
+  V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
+  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                      \
+  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
+  V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                     \
+  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                    \
+  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                        \
+  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                     \
+  V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                      \
+  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
+  V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
+  V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                      \
+  V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
+  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                     \
+  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                     \
+  V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
+  V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                          \
+  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
+  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
+  V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
+  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                     \
+  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
+  V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
+  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                    \
+  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                    \
+  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                        \
+  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                     \
+  V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Simd128Load3, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Simd128Store, Operator::kNoProperties, 3, 0, 1)                        \
+  V(Simd128Store1, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Simd128Store2, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Simd128Store3, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
@@ -428,6 +431,8 @@
   V(Int64)                   \
   V(Uint64)                  \
   V(Pointer)                 \
+  V(TaggedSigned)            \
+  V(TaggedPointer)           \
   V(AnyTagged)
 
 #define MACHINE_REPRESENTATION_LIST(V) \
@@ -504,9 +509,18 @@
               Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
               "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
   };                                                                         \
+  struct ProtectedLoad##Type##Operator final                                 \
+      : public Operator1<ProtectedLoadRepresentation> {                      \
+    ProtectedLoad##Type##Operator()                                          \
+        : Operator1<ProtectedLoadRepresentation>(                            \
+              IrOpcode::kProtectedLoad,                                      \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
+              "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
+  };                                                                         \
   Load##Type##Operator kLoad##Type;                                          \
   UnalignedLoad##Type##Operator kUnalignedLoad##Type;                        \
-  CheckedLoad##Type##Operator kCheckedLoad##Type;
+  CheckedLoad##Type##Operator kCheckedLoad##Type;                            \
+  ProtectedLoad##Type##Operator kProtectedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
@@ -701,6 +715,17 @@
   return nullptr;
 }
 
+const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
+#define LOAD(Type)                       \
+  if (rep == MachineType::Type()) {      \
+    return &cache_.kProtectedLoad##Type; \
+  }
+  MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+  UNREACHABLE();
+  return nullptr;
+}
+
 const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
 #define STACKSLOT(Type)                              \
   if (rep == MachineType::Type().representation()) { \
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 611846a..56cefc5 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -41,6 +41,7 @@
 
 // A Load needs a MachineType.
 typedef MachineType LoadRepresentation;
+typedef LoadRepresentation ProtectedLoadRepresentation;
 
 LoadRepresentation LoadRepresentationOf(Operator const*);
 
@@ -276,9 +277,15 @@
   const Operator* Uint64LessThanOrEqual();
   const Operator* Uint64Mod();
 
+  // This operator reinterprets the bits of a tagged pointer as word.
+  const Operator* BitcastTaggedToWord();
+
   // This operator reinterprets the bits of a word as tagged pointer.
   const Operator* BitcastWordToTagged();
 
+  // This operator reinterprets the bits of a word as a Smi.
+  const Operator* BitcastWordToTaggedSigned();
+
   // JavaScript float64 to int32/uint32 truncation.
   const Operator* TruncateFloat64ToWord32();
 
@@ -302,16 +309,6 @@
   const Operator* ChangeUint32ToFloat64();
   const Operator* ChangeUint32ToUint64();
 
-  // These are changes from impossible values (for example a smi-checked
-  // string).  They can safely emit an abort instruction, which should
-  // never be reached.
-  const Operator* ImpossibleToWord32();
-  const Operator* ImpossibleToWord64();
-  const Operator* ImpossibleToFloat32();
-  const Operator* ImpossibleToFloat64();
-  const Operator* ImpossibleToTagged();
-  const Operator* ImpossibleToBit();
-
   // These operators truncate or round numbers, both changing the representation
   // of the number and mapping multiple input values onto the same output value.
   const Operator* TruncateFloat64ToFloat32();
@@ -611,6 +608,7 @@
 
   // load [base + index]
   const Operator* Load(LoadRepresentation rep);
+  const Operator* ProtectedLoad(LoadRepresentation rep);
 
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 97c4362..66fcbb9 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -107,7 +107,38 @@
   Node* size = node->InputAt(0);
   Node* effect = node->InputAt(1);
   Node* control = node->InputAt(2);
-  PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+  PretenureFlag pretenure = PretenureFlagOf(node->op());
+
+  // Propagate tenuring from outer allocations to inner allocations, i.e.
+  // when we allocate an object in old space and store a newly allocated
+  // child object into the pretenured object, then the newly allocated
+  // child object also should get pretenured to old space.
+  if (pretenure == TENURED) {
+    for (Edge const edge : node->use_edges()) {
+      Node* const user = edge.from();
+      if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
+        Node* const child = user->InputAt(1);
+        if (child->opcode() == IrOpcode::kAllocate &&
+            PretenureFlagOf(child->op()) == NOT_TENURED) {
+          NodeProperties::ChangeOp(child, node->op());
+          break;
+        }
+      }
+    }
+  } else {
+    DCHECK_EQ(NOT_TENURED, pretenure);
+    for (Edge const edge : node->use_edges()) {
+      Node* const user = edge.from();
+      if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
+        Node* const parent = user->InputAt(0);
+        if (parent->opcode() == IrOpcode::kAllocate &&
+            PretenureFlagOf(parent->op()) == TENURED) {
+          pretenure = TENURED;
+          break;
+        }
+      }
+    }
+  }
 
   // Determine the top/limit addresses.
   Node* top_address = jsgraph()->ExternalConstant(
@@ -122,9 +153,9 @@
   // Check if we can fold this allocation into a previous allocation represented
   // by the incoming {state}.
   Int32Matcher m(size);
-  if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
+  if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
     int32_t const object_size = m.Value();
-    if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
+    if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
         state->group()->pretenure() == pretenure) {
       // We can fold this Allocate {node} into the allocation {group}
       // represented by the given {state}. Compute the upper bound for
@@ -282,8 +313,9 @@
 
     control = graph()->NewNode(common()->Merge(2), if_true, if_false);
     effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                             vtrue, vfalse, control);
+    value = graph()->NewNode(
+        common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
+        control);
 
     // Create an unfoldable allocation group.
     AllocationGroup* group =
diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h
index f0cd546..ba1d6dd 100644
--- a/src/compiler/memory-optimizer.h
+++ b/src/compiler/memory-optimizer.h
@@ -5,7 +5,7 @@
 #ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
 #define V8_COMPILER_MEMORY_OPTIMIZER_H_
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index d06bc30..12ab4af 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/ast/scopes.h"
 #include "src/compiler/code-generator.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -693,9 +693,6 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -710,8 +707,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1121,6 +1118,38 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMipsMaddS:
+      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      break;
+    case kMipsMaddD:
+      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+      break;
+    case kMipsMaddfS:
+      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+                 i.InputFloatRegister(2));
+      break;
+    case kMipsMaddfD:
+      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+                 i.InputDoubleRegister(2));
+      break;
+    case kMipsMsubS:
+      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      break;
+    case kMipsMsubD:
+      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+      break;
+    case kMipsMsubfS:
+      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+                 i.InputFloatRegister(2));
+      break;
+    case kMipsMsubfD:
+      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+                 i.InputDoubleRegister(2));
+      break;
     case kMipsMulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1358,7 +1387,12 @@
       break;
 
     // ... more basic instructions ...
-
+    case kMipsSeb:
+      __ seb(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kMipsSeh:
+      __ seh(i.OutputRegister(), i.InputRegister(0));
+      break;
     case kMipsLbu:
       __ lbu(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -1843,13 +1877,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2028,9 +2063,14 @@
     } else if (src.type() == Constant::kFloat32) {
       if (destination->IsFPStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
-        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
-        __ sw(at, dst);
+        if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+          __ sw(zero_reg, dst);
+        } else {
+          __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+          __ sw(at, dst);
+        }
       } else {
+        DCHECK(destination->IsFPRegister());
         FloatRegister dst = g.ToSingleRegister(destination);
         __ Move(dst, src.ToFloat32());
       }
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 269ac0f..45ed041 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -69,6 +69,14 @@
   V(MipsAddPair)                   \
   V(MipsSubPair)                   \
   V(MipsMulPair)                   \
+  V(MipsMaddS)                     \
+  V(MipsMaddD)                     \
+  V(MipsMaddfS)                    \
+  V(MipsMaddfD)                    \
+  V(MipsMsubS)                     \
+  V(MipsMsubD)                     \
+  V(MipsMsubfS)                    \
+  V(MipsMsubfD)                    \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
   V(MipsFloat32RoundUp)            \
@@ -126,7 +134,9 @@
   V(MipsPush)                      \
   V(MipsStoreToStackSlot)          \
   V(MipsByteSwap32)                \
-  V(MipsStackClaim)
+  V(MipsStackClaim)                \
+  V(MipsSeb)                       \
+  V(MipsSeh)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 4c35369..0a98930 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -185,6 +185,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   MipsOperandGenerator g(this);
@@ -198,7 +202,7 @@
 
   // TODO(mips): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
@@ -403,6 +407,24 @@
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (m.right().HasValue() && mleft.right().HasValue()) {
+      MipsOperandGenerator g(this);
+      uint32_t sar = m.right().Value();
+      uint32_t shl = mleft.right().Value();
+      if ((sar == shl) && (sar == 16)) {
+        Emit(kMipsSeh, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()));
+        return;
+      } else if ((sar == shl) && (sar == 24)) {
+        Emit(kMipsSeb, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()));
+        return;
+      }
+    }
+  }
   VisitRRO(this, kMipsSar, node);
 }
 
@@ -759,20 +781,126 @@
 
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
+  MipsOperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+    // For Add.S(Mul.S(x, y), z):
+    Float32BinopMatcher mleft(m.left().node());
+    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+      Emit(kMipsMaddS, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(z, x, y).
+      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  }
+  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+    // For Add.S(x, Mul.S(y, z)):
+    Float32BinopMatcher mright(m.right().node());
+    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(x, y, z).
+      Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(x, y, z).
+      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMipsAddS, node);
 }
 
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
+  MipsOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    // For Add.D(Mul.D(x, y), z):
+    Float64BinopMatcher mleft(m.left().node());
+    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(z, x, y).
+      Emit(kMipsMaddD, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(z, x, y).
+      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  }
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    // For Add.D(x, Mul.D(y, z)):
+    Float64BinopMatcher mright(m.right().node());
+    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(x, y, z).
+      Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(x, y, z).
+      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMipsAddD, node);
 }
 
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
+  MipsOperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+    if (IsMipsArchVariant(kMips32r2)) {
+      // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
+      Float32BinopMatcher mleft(m.left().node());
+      Emit(kMipsMsubS, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+    if (IsMipsArchVariant(kMips32r6)) {
+      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
+      Float32BinopMatcher mright(m.right().node());
+      Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMipsSubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
+  MipsOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    if (IsMipsArchVariant(kMips32r2)) {
+      // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
+      Float64BinopMatcher mleft(m.left().node());
+      Emit(kMipsMsubD, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    if (IsMipsArchVariant(kMips32r6)) {
+      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
+      Float64BinopMatcher mright(m.right().node());
+      Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMipsSubD, node);
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 3e2e8e2..9ed72ae 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/ast/scopes.h"
 #include "src/compiler/code-generator.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -702,9 +702,6 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -719,8 +716,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1317,6 +1314,38 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
+    case kMips64MaddS:
+      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      break;
+    case kMips64MaddD:
+      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+      break;
+    case kMips64MaddfS:
+      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+                 i.InputFloatRegister(2));
+      break;
+    case kMips64MaddfD:
+      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+                 i.InputDoubleRegister(2));
+      break;
+    case kMips64MsubS:
+      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      break;
+    case kMips64MsubD:
+      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+      break;
+    case kMips64MsubfS:
+      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+                 i.InputFloatRegister(2));
+      break;
+    case kMips64MsubfD:
+      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+                 i.InputDoubleRegister(2));
+      break;
     case kMips64MulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1644,6 +1673,12 @@
       break;
     // ... more basic instructions ...
 
+    case kMips64Seb:
+      __ seb(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kMips64Seh:
+      __ seh(i.OutputRegister(), i.InputRegister(0));
+      break;
     case kMips64Lbu:
       __ lbu(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -2164,13 +2199,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2350,9 +2386,14 @@
     } else if (src.type() == Constant::kFloat32) {
       if (destination->IsFPStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
-        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
-        __ sw(at, dst);
+        if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+          __ sw(zero_reg, dst);
+        } else {
+          __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+          __ sw(at, dst);
+        }
       } else {
+        DCHECK(destination->IsFPRegister());
         FloatRegister dst = g.ToSingleRegister(destination);
         __ Move(dst, src.ToFloat32());
       }
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index e3dedd1..6a44434 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -85,6 +85,14 @@
   V(Mips64SqrtD)                    \
   V(Mips64MaxD)                     \
   V(Mips64MinD)                     \
+  V(Mips64MaddS)                    \
+  V(Mips64MaddD)                    \
+  V(Mips64MaddfS)                   \
+  V(Mips64MaddfD)                   \
+  V(Mips64MsubS)                    \
+  V(Mips64MsubD)                    \
+  V(Mips64MsubfS)                   \
+  V(Mips64MsubfD)                   \
   V(Mips64Float64RoundDown)         \
   V(Mips64Float64RoundTruncate)     \
   V(Mips64Float64RoundUp)           \
@@ -159,7 +167,9 @@
   V(Mips64StoreToStackSlot)         \
   V(Mips64ByteSwap64)               \
   V(Mips64ByteSwap32)               \
-  V(Mips64StackClaim)
+  V(Mips64StackClaim)               \
+  V(Mips64Seb)                      \
+  V(Mips64Seh)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 1167117..6e937e2 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -199,6 +199,10 @@
   EmitLoad(this, node, opcode);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   Mips64OperandGenerator g(this);
@@ -212,7 +216,7 @@
 
   // TODO(mips): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
@@ -500,6 +504,28 @@
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (m.right().HasValue() && mleft.right().HasValue()) {
+      Mips64OperandGenerator g(this);
+      uint32_t sar = m.right().Value();
+      uint32_t shl = mleft.right().Value();
+      if ((sar == shl) && (sar == 16)) {
+        Emit(kMips64Seh, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()));
+        return;
+      } else if ((sar == shl) && (sar == 24)) {
+        Emit(kMips64Seb, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()));
+        return;
+      } else if ((sar == shl) && (sar == 32)) {
+        Emit(kMips64Shl, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+        return;
+      }
+    }
+  }
   VisitRRO(this, kMips64Sar, node);
 }
 
@@ -1198,20 +1224,126 @@
 
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+    // For Add.S(Mul.S(x, y), z):
+    Float32BinopMatcher mleft(m.left().node());
+    if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+      Emit(kMips64MaddS, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(z, x, y).
+      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  }
+  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+    // For Add.S(x, Mul.S(y, z)):
+    Float32BinopMatcher mright(m.right().node());
+    if (kArchVariant == kMips64r2) {  // Select Madd.S(x, y, z).
+      Emit(kMips64MaddS, g.DefineAsRegister(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(x, y, z).
+      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMips64AddS, node);
 }
 
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    // For Add.D(Mul.D(x, y), z):
+    Float64BinopMatcher mleft(m.left().node());
+    if (kArchVariant == kMips64r2) {  // Select Madd.D(z, x, y).
+      Emit(kMips64MaddD, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(z, x, y).
+      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  }
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    // For Add.D(x, Mul.D(y, z)):
+    Float64BinopMatcher mright(m.right().node());
+    if (kArchVariant == kMips64r2) {  // Select Madd.D(x, y, z).
+      Emit(kMips64MaddD, g.DefineAsRegister(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(x, y, z).
+      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMips64AddD, node);
 }
 
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
+  Mips64OperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+    if (kArchVariant == kMips64r2) {
+      // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
+      Float32BinopMatcher mleft(m.left().node());
+      Emit(kMips64MsubS, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+    if (kArchVariant == kMips64r6) {
+      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
+      Float32BinopMatcher mright(m.right().node());
+      Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMips64SubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
+  Mips64OperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    if (kArchVariant == kMips64r2) {
+      // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
+      Float64BinopMatcher mleft(m.left().node());
+      Emit(kMips64MsubD, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.UseRegister(mleft.right().node()));
+      return;
+    }
+  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    if (kArchVariant == kMips64r6) {
+      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
+      Float64BinopMatcher mright(m.right().node());
+      Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
   VisitRRR(this, kMips64SubD, node);
 }
 
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index 482c254..d87ece3 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -424,7 +424,7 @@
 namespace {
 
 bool IsSlot(const InstructionOperand& op) {
-  return op.IsStackSlot() || op.IsDoubleStackSlot();
+  return op.IsStackSlot() || op.IsFPStackSlot();
 }
 
 
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
index 8e932a0..ce26a7f 100644
--- a/src/compiler/move-optimizer.h
+++ b/src/compiler/move-optimizer.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_MOVE_OPTIMIZER_
 
 #include "src/compiler/instruction.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
index 7a88292..b50ff38 100644
--- a/src/compiler/node-aux-data.h
+++ b/src/compiler/node-aux-data.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_NODE_AUX_DATA_H_
 
 #include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 061a3ae..0be6f81 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -6,8 +6,8 @@
 
 #include <cstring>
 
-#include "src/zone.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 10aed51..6c283dc 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -11,6 +11,7 @@
 #include "src/assembler.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
+#include "src/double.h"
 
 namespace v8 {
 namespace internal {
@@ -161,6 +162,17 @@
   bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
   bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
   bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
+  bool IsNormal() const {
+    return this->HasValue() && std::isnormal(this->Value());
+  }
+  bool IsPositiveOrNegativePowerOf2() const {
+    if (!this->HasValue() || (this->Value() == 0.0)) {
+      return false;
+    }
+    Double value = Double(this->Value());
+    return !value.IsInfinite() &&
+           base::bits::IsPowerOfTwo64(value.Significand());
+  }
 };
 
 typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 9812158..ed3c117 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -6,7 +6,7 @@
 #define V8_COMPILER_NODE_PROPERTIES_H_
 
 #include "src/compiler/node.h"
-#include "src/types.h"
+#include "src/compiler/types.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/node.h b/src/compiler/node.h
index 4935187..e940371 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -7,8 +7,8 @@
 
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/types.h"
-#include "src/zone-containers.h"
+#include "src/compiler/types.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index c1b5945..5ac2012 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -134,7 +134,8 @@
   V(JSStoreGlobal)            \
   V(JSDeleteProperty)         \
   V(JSHasProperty)            \
-  V(JSInstanceOf)
+  V(JSInstanceOf)             \
+  V(JSOrdinaryHasInstance)
 
 #define JS_CONTEXT_OP_LIST(V) \
   V(JSLoadContext)            \
@@ -150,10 +151,8 @@
   V(JSCallFunction)                 \
   V(JSCallRuntime)                  \
   V(JSConvertReceiver)              \
-  V(JSForInDone)                    \
   V(JSForInNext)                    \
   V(JSForInPrepare)                 \
-  V(JSForInStep)                    \
   V(JSLoadMessage)                  \
   V(JSStoreMessage)                 \
   V(JSGeneratorStore)               \
@@ -181,7 +180,8 @@
   V(ChangeTaggedToBit)               \
   V(ChangeBitToTagged)               \
   V(TruncateTaggedToWord32)          \
-  V(TruncateTaggedToFloat64)
+  V(TruncateTaggedToFloat64)         \
+  V(TruncateTaggedToBit)
 
 #define SIMPLIFIED_CHECKED_OP_LIST(V) \
   V(CheckedInt32Add)                  \
@@ -191,12 +191,15 @@
   V(CheckedUint32Div)                 \
   V(CheckedUint32Mod)                 \
   V(CheckedInt32Mul)                  \
+  V(CheckedInt32ToTaggedSigned)       \
   V(CheckedUint32ToInt32)             \
+  V(CheckedUint32ToTaggedSigned)      \
   V(CheckedFloat64ToInt32)            \
   V(CheckedTaggedSignedToInt32)       \
   V(CheckedTaggedToInt32)             \
   V(CheckedTruncateTaggedToWord32)    \
-  V(CheckedTaggedToFloat64)
+  V(CheckedTaggedToFloat64)           \
+  V(CheckedTaggedToTaggedSigned)
 
 #define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
   V(NumberEqual)                         \
@@ -270,6 +273,7 @@
   V(NumberTan)                         \
   V(NumberTanh)                        \
   V(NumberTrunc)                       \
+  V(NumberToBoolean)                   \
   V(NumberToInt32)                     \
   V(NumberToUint32)                    \
   V(NumberSilenceNaN)
@@ -281,13 +285,14 @@
   V(BooleanNot)                     \
   V(StringCharCodeAt)               \
   V(StringFromCharCode)             \
+  V(StringFromCodePoint)            \
   V(CheckBounds)                    \
   V(CheckIf)                        \
   V(CheckMaps)                      \
   V(CheckNumber)                    \
   V(CheckString)                    \
-  V(CheckTaggedPointer)             \
-  V(CheckTaggedSigned)              \
+  V(CheckSmi)                       \
+  V(CheckHeapObject)                \
   V(CheckFloat64Hole)               \
   V(CheckTaggedHole)                \
   V(ConvertTaggedHoleToUndefined)   \
@@ -306,6 +311,7 @@
   V(ObjectIsSmi)                    \
   V(ObjectIsString)                 \
   V(ObjectIsUndetectable)           \
+  V(ArrayBufferWasNeutered)         \
   V(EnsureWritableFastElements)     \
   V(MaybeGrowFastElements)          \
   V(TransitionElementsKind)
@@ -338,59 +344,131 @@
   V(Float64LessThan)                  \
   V(Float64LessThanOrEqual)
 
+#define MACHINE_UNOP_32_LIST(V) \
+  V(Word32Clz)                  \
+  V(Word32Ctz)                  \
+  V(Word32ReverseBits)          \
+  V(Word32ReverseBytes)
+
+#define MACHINE_BINOP_32_LIST(V) \
+  V(Word32And)                   \
+  V(Word32Or)                    \
+  V(Word32Xor)                   \
+  V(Word32Shl)                   \
+  V(Word32Shr)                   \
+  V(Word32Sar)                   \
+  V(Word32Ror)                   \
+  V(Int32Add)                    \
+  V(Int32AddWithOverflow)        \
+  V(Int32Sub)                    \
+  V(Int32SubWithOverflow)        \
+  V(Int32Mul)                    \
+  V(Int32MulWithOverflow)        \
+  V(Int32MulHigh)                \
+  V(Int32Div)                    \
+  V(Int32Mod)                    \
+  V(Uint32Div)                   \
+  V(Uint32Mod)                   \
+  V(Uint32MulHigh)
+
+#define MACHINE_BINOP_64_LIST(V) \
+  V(Word64And)                   \
+  V(Word64Or)                    \
+  V(Word64Xor)                   \
+  V(Word64Shl)                   \
+  V(Word64Shr)                   \
+  V(Word64Sar)                   \
+  V(Word64Ror)                   \
+  V(Int64Add)                    \
+  V(Int64AddWithOverflow)        \
+  V(Int64Sub)                    \
+  V(Int64SubWithOverflow)        \
+  V(Int64Mul)                    \
+  V(Int64Div)                    \
+  V(Int64Mod)                    \
+  V(Uint64Div)                   \
+  V(Uint64Mod)
+
+#define MACHINE_FLOAT32_UNOP_LIST(V) \
+  V(Float32Abs)                      \
+  V(Float32Neg)                      \
+  V(Float32RoundDown)                \
+  V(Float32RoundTiesEven)            \
+  V(Float32RoundTruncate)            \
+  V(Float32RoundUp)                  \
+  V(Float32Sqrt)
+
+#define MACHINE_FLOAT32_BINOP_LIST(V) \
+  V(Float32Add)                       \
+  V(Float32Sub)                       \
+  V(Float32Mul)                       \
+  V(Float32Div)                       \
+  V(Float32Max)                       \
+  V(Float32Min)
+
+#define MACHINE_FLOAT64_UNOP_LIST(V) \
+  V(Float64Abs)                      \
+  V(Float64Acos)                     \
+  V(Float64Acosh)                    \
+  V(Float64Asin)                     \
+  V(Float64Asinh)                    \
+  V(Float64Atan)                     \
+  V(Float64Atanh)                    \
+  V(Float64Cbrt)                     \
+  V(Float64Cos)                      \
+  V(Float64Cosh)                     \
+  V(Float64Exp)                      \
+  V(Float64Expm1)                    \
+  V(Float64Log)                      \
+  V(Float64Log1p)                    \
+  V(Float64Log10)                    \
+  V(Float64Log2)                     \
+  V(Float64Neg)                      \
+  V(Float64RoundDown)                \
+  V(Float64RoundTiesAway)            \
+  V(Float64RoundTiesEven)            \
+  V(Float64RoundTruncate)            \
+  V(Float64RoundUp)                  \
+  V(Float64Sin)                      \
+  V(Float64Sinh)                     \
+  V(Float64Sqrt)                     \
+  V(Float64Tan)                      \
+  V(Float64Tanh)
+
+#define MACHINE_FLOAT64_BINOP_LIST(V) \
+  V(Float64Atan2)                     \
+  V(Float64Max)                       \
+  V(Float64Min)                       \
+  V(Float64Add)                       \
+  V(Float64Sub)                       \
+  V(Float64Mul)                       \
+  V(Float64Div)                       \
+  V(Float64Mod)                       \
+  V(Float64Pow)
+
 #define MACHINE_OP_LIST(V)      \
+  MACHINE_UNOP_32_LIST(V)       \
+  MACHINE_BINOP_32_LIST(V)      \
+  MACHINE_BINOP_64_LIST(V)      \
   MACHINE_COMPARE_BINOP_LIST(V) \
+  MACHINE_FLOAT32_BINOP_LIST(V) \
+  MACHINE_FLOAT32_UNOP_LIST(V)  \
+  MACHINE_FLOAT64_BINOP_LIST(V) \
+  MACHINE_FLOAT64_UNOP_LIST(V)  \
   V(DebugBreak)                 \
   V(Comment)                    \
   V(Load)                       \
   V(Store)                      \
   V(StackSlot)                  \
-  V(Word32And)                  \
-  V(Word32Or)                   \
-  V(Word32Xor)                  \
-  V(Word32Shl)                  \
-  V(Word32Shr)                  \
-  V(Word32Sar)                  \
-  V(Word32Ror)                  \
-  V(Word32Clz)                  \
-  V(Word32Ctz)                  \
-  V(Word32ReverseBits)          \
-  V(Word32ReverseBytes)         \
   V(Word32Popcnt)               \
   V(Word64Popcnt)               \
-  V(Word64And)                  \
-  V(Word64Or)                   \
-  V(Word64Xor)                  \
-  V(Word64Shl)                  \
-  V(Word64Shr)                  \
-  V(Word64Sar)                  \
-  V(Word64Ror)                  \
   V(Word64Clz)                  \
   V(Word64Ctz)                  \
   V(Word64ReverseBits)          \
   V(Word64ReverseBytes)         \
-  V(Int32Add)                   \
-  V(Int32AddWithOverflow)       \
-  V(Int32Sub)                   \
-  V(Int32SubWithOverflow)       \
-  V(Int32Mul)                   \
-  V(Int32MulWithOverflow)       \
-  V(Int32MulHigh)               \
-  V(Int32Div)                   \
-  V(Int32Mod)                   \
-  V(Uint32Div)                  \
-  V(Uint32Mod)                  \
-  V(Uint32MulHigh)              \
-  V(Int64Add)                   \
-  V(Int64AddWithOverflow)       \
-  V(Int64Sub)                   \
-  V(Int64SubWithOverflow)       \
-  V(Int64Mul)                   \
-  V(Int64Div)                   \
-  V(Int64Mod)                   \
-  V(Uint64Div)                  \
-  V(Uint64Mod)                  \
+  V(BitcastTaggedToWord)        \
   V(BitcastWordToTagged)        \
+  V(BitcastWordToTaggedSigned)  \
   V(TruncateFloat64ToWord32)    \
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
@@ -407,12 +485,6 @@
   V(ChangeInt32ToInt64)         \
   V(ChangeUint32ToFloat64)      \
   V(ChangeUint32ToUint64)       \
-  V(ImpossibleToBit)            \
-  V(ImpossibleToWord32)         \
-  V(ImpossibleToWord64)         \
-  V(ImpossibleToFloat32)        \
-  V(ImpossibleToFloat64)        \
-  V(ImpossibleToTagged)         \
   V(TruncateFloat64ToFloat32)   \
   V(TruncateInt64ToInt32)       \
   V(RoundFloat64ToInt32)        \
@@ -426,55 +498,6 @@
   V(BitcastFloat64ToInt64)      \
   V(BitcastInt32ToFloat32)      \
   V(BitcastInt64ToFloat64)      \
-  V(Float32Add)                 \
-  V(Float32Sub)                 \
-  V(Float32Neg)                 \
-  V(Float32Mul)                 \
-  V(Float32Div)                 \
-  V(Float32Abs)                 \
-  V(Float32Sqrt)                \
-  V(Float32RoundDown)           \
-  V(Float32Max)                 \
-  V(Float32Min)                 \
-  V(Float64Add)                 \
-  V(Float64Sub)                 \
-  V(Float64Neg)                 \
-  V(Float64Mul)                 \
-  V(Float64Div)                 \
-  V(Float64Mod)                 \
-  V(Float64Max)                 \
-  V(Float64Min)                 \
-  V(Float64Abs)                 \
-  V(Float64Acos)                \
-  V(Float64Acosh)               \
-  V(Float64Asin)                \
-  V(Float64Asinh)               \
-  V(Float64Atan)                \
-  V(Float64Atanh)               \
-  V(Float64Atan2)               \
-  V(Float64Cbrt)                \
-  V(Float64Cos)                 \
-  V(Float64Cosh)                \
-  V(Float64Exp)                 \
-  V(Float64Expm1)               \
-  V(Float64Log)                 \
-  V(Float64Log1p)               \
-  V(Float64Log10)               \
-  V(Float64Log2)                \
-  V(Float64Pow)                 \
-  V(Float64Sin)                 \
-  V(Float64Sinh)                \
-  V(Float64Sqrt)                \
-  V(Float64Tan)                 \
-  V(Float64Tanh)                \
-  V(Float64RoundDown)           \
-  V(Float32RoundUp)             \
-  V(Float64RoundUp)             \
-  V(Float32RoundTruncate)       \
-  V(Float64RoundTruncate)       \
-  V(Float64RoundTiesAway)       \
-  V(Float32RoundTiesEven)       \
-  V(Float64RoundTiesEven)       \
   V(Float64ExtractLowWord32)    \
   V(Float64ExtractHighWord32)   \
   V(Float64InsertLowWord32)     \
@@ -492,6 +515,7 @@
   V(Word32PairShl)              \
   V(Word32PairShr)              \
   V(Word32PairSar)              \
+  V(ProtectedLoad)              \
   V(AtomicLoad)                 \
   V(AtomicStore)                \
   V(UnsafePointerAdd)
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
index f3ef778..4295a22 100644
--- a/src/compiler/operation-typer.cc
+++ b/src/compiler/operation-typer.cc
@@ -5,10 +5,10 @@
 #include "src/compiler/operation-typer.h"
 
 #include "src/compiler/common-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
 #include "src/factory.h"
 #include "src/isolate.h"
-#include "src/type-cache.h"
-#include "src/types.h"
 
 #include "src/objects-inl.h"
 
@@ -460,6 +460,16 @@
   return cache_.kIntegerOrMinusZeroOrNaN;
 }
 
+Type* OperationTyper::NumberToBoolean(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (!type->IsInhabited()) return Type::None();
+  if (type->Is(cache_.kZeroish)) return singleton_false_;
+  if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
+    return singleton_true_;  // Ruled out nan, -0 and +0.
+  }
+  return Type::Boolean();
+}
+
 Type* OperationTyper::NumberToInt32(Type* type) {
   DCHECK(type->Is(Type::Number()));
 
diff --git a/src/compiler/operation-typer.h b/src/compiler/operation-typer.h
index dcfe0c4..09f063c 100644
--- a/src/compiler/operation-typer.h
+++ b/src/compiler/operation-typer.h
@@ -11,15 +11,17 @@
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
 class Isolate;
 class RangeType;
-class Type;
-class TypeCache;
 class Zone;
 
 namespace compiler {
 
+// Forward declarations.
 class Operator;
+class Type;
+class TypeCache;
 
 class OperationTyper {
  public:
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 68d884d..0a9e644 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -61,6 +61,7 @@
     case IrOpcode::kJSLessThanOrEqual:
     case IrOpcode::kJSHasProperty:
     case IrOpcode::kJSInstanceOf:
+    case IrOpcode::kJSOrdinaryHasInstance:
 
     // Object operations
     case IrOpcode::kJSCreate:
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index b6ec2c6..8e3a9d1 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -10,7 +10,7 @@
 #include "src/base/flags.h"
 #include "src/base/functional.h"
 #include "src/handles.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
index 187e612..6d61aff 100644
--- a/src/compiler/osr.cc
+++ b/src/compiler/osr.cc
@@ -2,22 +2,23 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/osr.h"
 #include "src/ast/scopes.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/compiler/all-nodes.h"
-#include "src/compiler/common-operator.h"
 #include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/common-operator.h"
 #include "src/compiler/dead-code-elimination.h"
 #include "src/compiler/frame.h"
-#include "src/compiler/graph.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/graph-trimmer.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/loop-analysis.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-marker.h"
-#include "src/compiler/osr.h"
+#include "src/compiler/node.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -270,11 +271,8 @@
     }
   }
 
-  if (osr_loop_entry == nullptr) {
-    // No OSR entry found, do nothing.
-    CHECK(osr_normal_entry);
-    return;
-  }
+  CHECK_NOT_NULL(osr_normal_entry);  // Should have found the OSR normal entry.
+  CHECK_NOT_NULL(osr_loop_entry);    // Should have found the OSR loop entry.
 
   for (Node* use : osr_loop_entry->uses()) {
     if (use->opcode() == IrOpcode::kLoop) {
diff --git a/src/compiler/osr.h b/src/compiler/osr.h
index 89773f0..1f562c5 100644
--- a/src/compiler/osr.h
+++ b/src/compiler/osr.h
@@ -5,7 +5,7 @@
 #ifndef V8_COMPILER_OSR_H_
 #define V8_COMPILER_OSR_H_
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 // TurboFan structures OSR graphs in a way that separates almost all phases of
 // compilation from OSR implementation details. This is accomplished with
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
index 5b97abe..a032c3d 100644
--- a/src/compiler/pipeline-statistics.cc
+++ b/src/compiler/pipeline-statistics.cc
@@ -4,9 +4,10 @@
 
 #include <memory>
 
-#include "src/compiler.h"
+#include "src/compilation-info.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/zone-pool.h"
+#include "src/isolate.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index ba7aa96..805b687 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -10,6 +10,8 @@
 
 #include "src/base/adapters.h"
 #include "src/base/platform/elapsed-timer.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/compiler/ast-graph-builder.h"
 #include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/basic-block-instrumentor.h"
@@ -46,6 +48,7 @@
 #include "src/compiler/loop-analysis.h"
 #include "src/compiler/loop-peeling.h"
 #include "src/compiler/loop-variable-optimizer.h"
+#include "src/compiler/machine-graph-verifier.h"
 #include "src/compiler/machine-operator-reducer.h"
 #include "src/compiler/memory-optimizer.h"
 #include "src/compiler/move-optimizer.h"
@@ -63,6 +66,7 @@
 #include "src/compiler/store-store-elimination.h"
 #include "src/compiler/tail-call-optimization.h"
 #include "src/compiler/type-hint-analyzer.h"
+#include "src/compiler/typed-optimization.h"
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
 #include "src/compiler/verifier.h"
@@ -426,7 +430,8 @@
   }
   if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
     AllowHandleDereference allow_deref;
-    OFStream os(stdout);
+    CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     os << "-- Schedule --------------------------------------\n" << *schedule;
   }
 }
@@ -439,14 +444,14 @@
                                LoopAssignmentAnalysis* loop_assignment,
                                TypeHintAnalysis* type_hint_analysis,
                                SourcePositionTable* source_positions)
-      : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
+      : AstGraphBuilder(local_zone, info, jsgraph, 1.0f, loop_assignment,
                         type_hint_analysis),
         source_positions_(source_positions),
         start_position_(info->shared_info()->start_position()) {}
 
-  bool CreateGraph(bool stack_check) {
+  bool CreateGraph() {
     SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
-    return AstGraphBuilder::CreateGraph(stack_check);
+    return AstGraphBuilder::CreateGraph();
   }
 
 #define DEF_VISIT(type)                                               \
@@ -562,7 +567,7 @@
   PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
       // Note that the CompilationInfo is not initialized at the time we pass it
       // to the CompilationJob constructor, but it is not dereferenced there.
-      : CompilationJob(&info_, "TurboFan"),
+      : CompilationJob(isolate, &info_, "TurboFan"),
         zone_(isolate->allocator()),
         zone_pool_(isolate->allocator()),
         parse_info_(&zone_, function),
@@ -601,6 +606,9 @@
     if (FLAG_native_context_specialization) {
       info()->MarkAsNativeContextSpecializing();
     }
+    if (FLAG_turbo_inlining) {
+      info()->MarkAsInliningEnabled();
+    }
   }
   if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
     info()->MarkAsDeoptimizationEnabled();
@@ -615,14 +623,6 @@
     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
   }
 
-  // TODO(mstarzinger): Hack to ensure that certain call descriptors are
-  // initialized on the main thread, since it is needed off-thread by the
-  // effect control linearizer.
-  CodeFactory::CopyFastSmiOrObjectElements(info()->isolate());
-  CodeFactory::GrowFastDoubleElements(info()->isolate());
-  CodeFactory::GrowFastSmiOrObjectElements(info()->isolate());
-  CodeFactory::ToNumber(info()->isolate());
-
   linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
 
   if (!pipeline_.CreateGraph()) {
@@ -660,7 +660,8 @@
   explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
                                       CallDescriptor* descriptor,
                                       SourcePositionTable* source_positions)
-      : CompilationJob(info, "TurboFan", State::kReadyToExecute),
+      : CompilationJob(info->isolate(), info, "TurboFan",
+                       State::kReadyToExecute),
         zone_pool_(info->isolate()->allocator()),
         data_(&zone_pool_, info, graph, source_positions),
         pipeline_(&data_),
@@ -756,18 +757,17 @@
   static const char* phase_name() { return "graph builder"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    bool stack_check = !data->info()->IsStub();
     bool succeeded = false;
 
     if (data->info()->is_optimizing_from_bytecode()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
-                                         data->jsgraph());
+                                         data->jsgraph(), 1.0f);
       succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
           data->type_hint_analysis(), data->source_positions());
-      succeeded = graph_builder.CreateGraph(stack_check);
+      succeeded = graph_builder.CreateGraph();
     }
 
     if (!succeeded) {
@@ -786,15 +786,19 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(data->jsgraph(),
-                               data->info()->is_deoptimization_enabled()
-                                   ? JSCallReducer::kDeoptimizationEnabled
-                                   : JSCallReducer::kNoFlags,
-                               data->native_context());
+    JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
+    if (data->info()->is_bailout_on_uninitialized()) {
+      call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
+    }
+    if (data->info()->is_deoptimization_enabled()) {
+      call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
+    }
+    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+                               call_reducer_flags, data->native_context());
     JSContextSpecialization context_specialization(
         &graph_reducer, data->jsgraph(),
         data->info()->is_function_context_specializing()
-            ? data->info()->context()
+            ? handle(data->info()->context())
             : MaybeHandle<Context>());
     JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
                                                data->jsgraph());
@@ -837,9 +841,7 @@
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &call_reducer);
-    if (!data->info()->is_optimizing_from_bytecode()) {
-      AddReducer(data, &graph_reducer, &inlining);
-    }
+    AddReducer(data, &graph_reducer, &inlining);
     graph_reducer.ReduceGraph();
   }
 };
@@ -913,7 +915,7 @@
             : MaybeHandle<LiteralsArray>();
     JSCreateLowering create_lowering(
         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
-        literals_array, temp_zone);
+        literals_array, data->native_context(), temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -921,6 +923,12 @@
     JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
                                    typed_lowering_flags, data->jsgraph(),
                                    temp_zone);
+    TypedOptimization typed_optimization(
+        &graph_reducer, data->info()->dependencies(),
+        data->info()->is_deoptimization_enabled()
+            ? TypedOptimization::kDeoptimizationEnabled
+            : TypedOptimization::kNoFlags,
+        data->jsgraph());
     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
     CheckpointElimination checkpoint_elimination(&graph_reducer);
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -930,6 +938,7 @@
     if (data->info()->is_deoptimization_enabled()) {
       AddReducer(data, &graph_reducer, &create_lowering);
     }
+    AddReducer(data, &graph_reducer, &typed_optimization);
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &checkpoint_elimination);
@@ -1065,14 +1074,13 @@
 };
 
 // The store-store elimination greatly benefits from doing a common operator
-// reducer just before it, to eliminate conditional deopts with a constant
-// condition.
+// reducer and dead code elimination just before it, to eliminate conditional
+// deopts with a constant condition.
 
 struct DeadCodeEliminationPhase {
-  static const char* phase_name() { return "common operator reducer"; }
+  static const char* phase_name() { return "dead code elimination"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    // Run the common operator reducer.
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
@@ -1225,8 +1233,17 @@
         data->schedule(), data->source_positions(), data->frame(),
         data->info()->is_source_positions_enabled()
             ? InstructionSelector::kAllSourcePositions
-            : InstructionSelector::kCallSourcePositions);
-    selector.SelectInstructions();
+            : InstructionSelector::kCallSourcePositions,
+        InstructionSelector::SupportedFeatures(),
+        FLAG_turbo_instruction_scheduling
+            ? InstructionSelector::kEnableScheduling
+            : InstructionSelector::kDisableScheduling,
+        data->info()->will_serialize()
+            ? InstructionSelector::kEnableSerialization
+            : InstructionSelector::kDisableSerialization);
+    if (!selector.SelectInstructions()) {
+      data->set_compilation_failed();
+    }
   }
 };
 
@@ -1426,7 +1443,8 @@
 
     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
       AllowHandleDereference allow_deref;
-      OFStream os(stdout);
+      CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+      OFStream os(tracing_scope.file());
       os << "-- Graph after " << phase << " -- " << std::endl;
       os << AsRPO(*graph);
     }
@@ -1459,7 +1477,8 @@
   data->BeginPhaseKind("graph creation");
 
   if (FLAG_trace_turbo) {
-    OFStream os(stdout);
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     os << "---------------------------------------------------\n"
        << "Begin compiling method " << info()->GetDebugName().get()
        << " using Turbofan" << std::endl;
@@ -1585,7 +1604,7 @@
   RunPrintAndVerify("Effect and control linearized", true);
 
   Run<DeadCodeEliminationPhase>();
-  RunPrintAndVerify("Common operator reducer", true);
+  RunPrintAndVerify("Dead code elimination", true);
 
   if (FLAG_turbo_store_elimination) {
     Run<StoreStoreEliminationPhase>();
@@ -1623,6 +1642,7 @@
                                                Code::Flags flags,
                                                const char* debug_name) {
   CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
+  if (isolate->serializer_enabled()) info.PrepareForSerializing();
 
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool(isolate->allocator());
@@ -1717,7 +1737,7 @@
                                            InstructionSequence* sequence,
                                            bool run_verifier) {
   CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
-                       sequence->zone());
+                       sequence->zone(), Code::ComputeFlags(Code::STUB));
   ZonePool zone_pool(sequence->isolate()->allocator());
   PipelineData data(&zone_pool, &info, sequence);
   PipelineImpl pipeline(&data);
@@ -1740,11 +1760,22 @@
         info(), data->graph(), data->schedule()));
   }
 
+  if (FLAG_turbo_verify_machine_graph) {
+    Zone temp_zone(data->isolate()->allocator());
+    MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+                              &temp_zone);
+  }
+
   data->InitializeInstructionSequence(call_descriptor);
 
   data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
   Run<InstructionSelectionPhase>(linkage);
+  if (data->compilation_failed()) {
+    info()->AbortOptimization(kCodeGenerationFailed);
+    data->EndPhaseKind();
+    return false;
+  }
 
   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
     AllowHandleDereference allow_deref;
@@ -1825,7 +1856,8 @@
     json_of << data->source_position_output();
     json_of << "}";
 
-    OFStream os(stdout);
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     os << "---------------------------------------------------\n"
        << "Finished compiling method " << info()->GetDebugName().get()
        << " using Turbofan" << std::endl;
@@ -1876,7 +1908,8 @@
   Run<BuildLiveRangesPhase>();
   if (FLAG_trace_turbo_graph) {
     AllowHandleDereference allow_deref;
-    OFStream os(stdout);
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     os << "----- Instruction sequence before register allocation -----\n"
        << PrintableInstructionSequence({config, data->sequence()});
   }
@@ -1911,7 +1944,8 @@
 
   if (FLAG_trace_turbo_graph) {
     AllowHandleDereference allow_deref;
-    OFStream os(stdout);
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     os << "----- Instruction sequence after register allocation -----\n"
        << PrintableInstructionSequence({config, data->sequence()});
   }
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 9db36b4..f8f3099 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -4,7 +4,7 @@
 
 #include "src/compiler/code-generator.h"
 
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -1077,9 +1077,6 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -1090,8 +1087,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -2071,7 +2068,8 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
@@ -2080,7 +2078,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index bad8ded..a2eb7b8 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -229,6 +229,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   PPCOperandGenerator g(this);
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index ae40f55..cdf45ab 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -402,6 +402,30 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* arg4, Node* arg5, Node* arg6,
+                                            Node* context) {
+  const int kArity = 6;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3,  arg4,
+                   arg5,   arg6, ref,  arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
+
 Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
                                           Node* function) {
   MachineSignature::Builder builder(zone(), 1, 0);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index c7d4236..cdd368c 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -135,6 +135,7 @@
     return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
                    base, index, value);
   }
+  Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
 
   // Unaligned memory operations
   Node* UnalignedLoad(MachineType rep, Node* base) {
@@ -531,9 +532,15 @@
   }
 
   // Conversions.
+  Node* BitcastTaggedToWord(Node* a) {
+    return AddNode(machine()->BitcastTaggedToWord(), a);
+  }
   Node* BitcastWordToTagged(Node* a) {
     return AddNode(machine()->BitcastWordToTagged(), a);
   }
+  Node* BitcastWordToTaggedSigned(Node* a) {
+    return AddNode(machine()->BitcastWordToTaggedSigned(), a);
+  }
   Node* TruncateFloat64ToWord32(Node* a) {
     return AddNode(machine()->TruncateFloat64ToWord32(), a);
   }
@@ -659,6 +666,9 @@
   Node* Float64InsertHighWord32(Node* a, Node* b) {
     return AddNode(machine()->Float64InsertHighWord32(), a, b);
   }
+  Node* Float64SilenceNaN(Node* a) {
+    return AddNode(machine()->Float64SilenceNaN(), a);
+  }
 
   // Stack operations.
   Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
@@ -744,6 +754,10 @@
   // Tail call to a runtime function with five arguments.
   Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
                          Node* arg3, Node* arg4, Node* arg5, Node* context);
+  // Tail call to a runtime function with six arguments.
+  Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+                         Node* context);
 
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
index c671fc2..6dcf2bf 100644
--- a/src/compiler/redundancy-elimination.cc
+++ b/src/compiler/redundancy-elimination.cc
@@ -19,12 +19,12 @@
   switch (node->opcode()) {
     case IrOpcode::kCheckBounds:
     case IrOpcode::kCheckFloat64Hole:
+    case IrOpcode::kCheckHeapObject:
     case IrOpcode::kCheckIf:
     case IrOpcode::kCheckNumber:
+    case IrOpcode::kCheckSmi:
     case IrOpcode::kCheckString:
     case IrOpcode::kCheckTaggedHole:
-    case IrOpcode::kCheckTaggedPointer:
-    case IrOpcode::kCheckTaggedSigned:
     case IrOpcode::kCheckedFloat64ToInt32:
     case IrOpcode::kCheckedInt32Add:
     case IrOpcode::kCheckedInt32Sub:
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 2db8af5..9a605d6 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -5,7 +5,7 @@
 #ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
 #define V8_REGISTER_ALLOCATOR_VERIFIER_H_
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 5b55b02..efcdcb4 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -1041,6 +1041,8 @@
 
   TopLevel()->UpdateParentForAllChildren(TopLevel());
   TopLevel()->UpdateSpillRangePostMerge(other);
+  TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
+                               other->has_slot_use());
 
 #if DEBUG
   Verify();
@@ -1113,9 +1115,9 @@
       first_interval_ = interval;
     } else {
       // Order of instruction's processing (see ProcessInstructions) guarantees
-      // that each new use interval either precedes or intersects with
-      // last added interval.
-      DCHECK(start < first_interval_->end());
+      // that each new use interval either precedes, intersects with or touches
+      // the last added interval.
+      DCHECK(start <= first_interval_->end());
       first_interval_->set_start(Min(start, first_interval_->start()));
       first_interval_->set_end(Max(end, first_interval_->end()));
     }
@@ -2383,17 +2385,15 @@
   return ret;
 }
 
-
-void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
-    bool operands_only) {
+void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
   size_t initial_range_count = data()->live_ranges().size();
   for (size_t i = 0; i < initial_range_count; ++i) {
     TopLevelLiveRange* range = data()->live_ranges()[i];
     if (!CanProcessRange(range)) continue;
-    if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
+    if (range->HasNoSpillType() ||
+        (range->HasSpillRange() && !range->has_slot_use())) {
       continue;
     }
-
     LifetimePosition start = range->Start();
     TRACE("Live range %d:%d is defined by a spill operand.\n",
           range->TopLevel()->vreg(), range->relative_id());
@@ -2571,8 +2571,7 @@
   DCHECK(active_live_ranges().empty());
   DCHECK(inactive_live_ranges().empty());
 
-  SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
-                                            num_allocatable_registers());
+  SplitAndSpillRangesDefinedByMemoryOperand();
 
   for (TopLevelLiveRange* range : data()->live_ranges()) {
     if (!CanProcessRange(range)) continue;
@@ -3273,8 +3272,8 @@
         spill_operand = range->GetSpillRangeOperand();
       }
       DCHECK(spill_operand.IsStackSlot());
-      DCHECK_EQ(MachineRepresentation::kTagged,
-                AllocatedOperand::cast(spill_operand).representation());
+      DCHECK(CanBeTaggedPointer(
+          AllocatedOperand::cast(spill_operand).representation()));
     }
 
     LiveRange* cur = range;
@@ -3336,8 +3335,8 @@
             safe_point);
         InstructionOperand operand = cur->GetAssignedOperand();
         DCHECK(!operand.IsStackSlot());
-        DCHECK_EQ(MachineRepresentation::kTagged,
-                  AllocatedOperand::cast(operand).representation());
+        DCHECK(CanBeTaggedPointer(
+            AllocatedOperand::cast(operand).representation()));
         map->RecordReference(AllocatedOperand::cast(operand));
       }
     }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 6bfc6c4..2089ea2 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -8,7 +8,7 @@
 #include "src/compiler/instruction.h"
 #include "src/ostreams.h"
 #include "src/register-configuration.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -965,7 +965,7 @@
 
   // Find the optimal split for ranges defined by a memory operand, e.g.
   // constants or function parameters passed on the stack.
-  void SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only);
+  void SplitAndSpillRangesDefinedByMemoryOperand();
 
   // Split the given range at the given position.
   // If range starts at or after the given position then the
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 5427bdb..22d809b 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -142,10 +142,17 @@
 
   switch (use_info.representation()) {
     case MachineRepresentation::kTaggedSigned:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
+             use_info.type_check() == TypeCheckKind::kSignedSmall);
+      return GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+                                              use_node, use_info);
     case MachineRepresentation::kTaggedPointer:
+      DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+      return GetTaggedPointerRepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kTagged:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
-      return GetTaggedRepresentationFor(node, output_rep, output_type);
+      return GetTaggedRepresentationFor(node, output_rep, output_type,
+                                        use_info.truncation());
     case MachineRepresentation::kFloat32:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetFloat32RepresentationFor(node, output_rep, output_type,
@@ -174,10 +181,132 @@
   return nullptr;
 }
 
-Node* RepresentationChanger::GetTaggedRepresentationFor(
+Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Node* use_node, UseInfo use_info) {
+  // Eagerly fold representation changes for constants.
+  switch (node->opcode()) {
+    case IrOpcode::kNumberConstant:
+      if (output_type->Is(Type::SignedSmall())) {
+        return node;
+      }
+      break;
+    default:
+      break;
+  }
+  // Select the correct X -> Tagged operator.
+  const Operator* op;
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Constant(0);
+  } else if (IsWord(output_rep)) {
+    if (output_type->Is(Type::Signed31())) {
+      op = simplified()->ChangeInt31ToTaggedSigned();
+    } else if (output_type->Is(Type::Signed32())) {
+      if (SmiValuesAre32Bits()) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+        op = simplified()->CheckedInt32ToTaggedSigned();
+      } else {
+        return TypeError(node, output_rep, output_type,
+                         MachineRepresentation::kTaggedSigned);
+      }
+    } else if (output_type->Is(Type::Unsigned32()) &&
+               use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = simplified()->CheckedUint32ToTaggedSigned();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedSigned);
+    }
+  } else if (output_rep == MachineRepresentation::kFloat64) {
+    if (output_type->Is(Type::Signed31())) {
+      // float64 -> int32 -> tagged signed
+      node = InsertChangeFloat64ToInt32(node);
+      op = simplified()->ChangeInt31ToTaggedSigned();
+    } else if (output_type->Is(Type::Signed32())) {
+      // float64 -> int32 -> tagged signed
+      node = InsertChangeFloat64ToInt32(node);
+      if (SmiValuesAre32Bits()) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+        op = simplified()->CheckedInt32ToTaggedSigned();
+      } else {
+        return TypeError(node, output_rep, output_type,
+                         MachineRepresentation::kTaggedSigned);
+      }
+    } else if (output_type->Is(Type::Unsigned32()) &&
+               use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      // float64 -> uint32 -> tagged signed
+      node = InsertChangeFloat64ToUint32(node);
+      op = simplified()->CheckedUint32ToTaggedSigned();
+    } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = simplified()->CheckedFloat64ToInt32(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
+      node = InsertConversion(node, op, use_node);
+      if (SmiValuesAre32Bits()) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else {
+        op = simplified()->CheckedInt32ToTaggedSigned();
+      }
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedSigned);
+    }
+  } else if (CanBeTaggedPointer(output_rep) &&
+             use_info.type_check() == TypeCheckKind::kSignedSmall) {
+    op = simplified()->CheckedTaggedToTaggedSigned();
+  } else if (output_rep == MachineRepresentation::kBit &&
+             use_info.type_check() == TypeCheckKind::kSignedSmall) {
+    // TODO(turbofan): Consider adding a Bailout operator that just deopts.
+    // Also use that for MachineRepresentation::kPointer case above.
+    node = InsertChangeBitToTagged(node);
+    op = simplified()->CheckedTaggedToTaggedSigned();
+  } else {
+    return TypeError(node, output_rep, output_type,
+                     MachineRepresentation::kTaggedSigned);
+  }
+  return InsertConversion(node, op, use_node);
+}
+
+Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
+    case IrOpcode::kHeapConstant:
+      return node;  // No change necessary.
+    case IrOpcode::kInt32Constant:
+      if (output_type->Is(Type::Boolean())) {
+        return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+                                               : jsgraph()->TrueConstant();
+      } else {
+        return TypeError(node, output_rep, output_type,
+                         MachineRepresentation::kTaggedPointer);
+      }
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kFloat32Constant:
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedPointer);
+    default:
+      break;
+  }
+  // Select the correct X -> Tagged operator.
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->TheHoleConstant();
+  }
+  return TypeError(node, output_rep, output_type,
+                   MachineRepresentation::kTaggedPointer);
+}
+
+Node* RepresentationChanger::GetTaggedRepresentationFor(
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Truncation truncation) {
+  // Eagerly fold representation changes for constants.
+  switch (node->opcode()) {
     case IrOpcode::kNumberConstant:
     case IrOpcode::kHeapConstant:
       return node;  // No change necessary.
@@ -202,12 +331,17 @@
     default:
       break;
   }
+  if (output_rep == MachineRepresentation::kTaggedSigned ||
+      output_rep == MachineRepresentation::kTaggedPointer) {
+    // this is a no-op.
+    return node;
+  }
   // Select the correct X -> Tagged operator.
   const Operator* op;
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only asisgn this representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    op = machine()->ImpossibleToTagged();
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->TheHoleConstant();
   } else if (output_rep == MachineRepresentation::kBit) {
     if (output_type->Is(Type::Boolean())) {
       op = simplified()->ChangeBitToTagged();
@@ -220,7 +354,10 @@
       op = simplified()->ChangeInt31ToTaggedSigned();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeInt32ToTagged();
-    } else if (output_type->Is(Type::Unsigned32())) {
+    } else if (output_type->Is(Type::Unsigned32()) ||
+               truncation.IsUsedAsWord32()) {
+      // Either the output is uint32 or the uses only care about the
+      // low 32 bits (so we can pick uint32 safely).
       op = simplified()->ChangeUint32ToTagged();
     } else {
       return TypeError(node, output_rep, output_type,
@@ -229,10 +366,7 @@
   } else if (output_rep ==
              MachineRepresentation::kFloat32) {  // float32 -> float64 -> tagged
     node = InsertChangeFloat32ToFloat64(node);
-    op = simplified()->ChangeFloat64ToTagged(
-        output_type->Maybe(Type::MinusZero())
-            ? CheckForMinusZeroMode::kCheckForMinusZero
-            : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    op = simplified()->ChangeFloat64ToTagged();
   } else if (output_rep == MachineRepresentation::kFloat64) {
     if (output_type->Is(Type::Signed31())) {  // float64 -> int32 -> tagged
       node = InsertChangeFloat64ToInt32(node);
@@ -246,10 +380,7 @@
       node = InsertChangeFloat64ToUint32(node);
       op = simplified()->ChangeUint32ToTagged();
     } else {
-      op = simplified()->ChangeFloat64ToTagged(
-          output_type->Maybe(Type::MinusZero())
-              ? CheckForMinusZeroMode::kCheckForMinusZero
-              : CheckForMinusZeroMode::kDontCheckForMinusZero);
+      op = simplified()->ChangeFloat64ToTagged();
     }
   } else {
     return TypeError(node, output_rep, output_type,
@@ -283,10 +414,10 @@
   }
   // Select the correct X -> Float32 operator.
   const Operator* op = nullptr;
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only use kNone representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    op = machine()->ImpossibleToFloat32();
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Float32Constant(0.0f);
   } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       // int32 -> float64 -> float32
@@ -303,7 +434,8 @@
       node = jsgraph()->graph()->NewNode(op, node);
       op = machine()->TruncateFloat64ToFloat32();
     }
-  } else if (output_rep == MachineRepresentation::kTagged) {
+  } else if (output_rep == MachineRepresentation::kTagged ||
+             output_rep == MachineRepresentation::kTaggedPointer) {
     if (output_type->Is(Type::NumberOrOddball())) {
       // tagged -> float64 -> float32
       if (output_type->Is(Type::Number())) {
@@ -352,10 +484,10 @@
   }
   // Select the correct X -> Float64 operator.
   const Operator* op = nullptr;
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only use kNone representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    op = machine()->ImpossibleToFloat64();
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Float64Constant(0.0);
   } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeInt32ToFloat64();
@@ -367,11 +499,14 @@
     }
   } else if (output_rep == MachineRepresentation::kBit) {
     op = machine()->ChangeUint32ToFloat64();
-  } else if (output_rep == MachineRepresentation::kTagged) {
+  } else if (output_rep == MachineRepresentation::kTagged ||
+             output_rep == MachineRepresentation::kTaggedSigned ||
+             output_rep == MachineRepresentation::kTaggedPointer) {
     if (output_type->Is(Type::Undefined())) {
       return jsgraph()->Float64Constant(
           std::numeric_limits<double>::quiet_NaN());
-    } else if (output_type->Is(Type::TaggedSigned())) {
+
+    } else if (output_rep == MachineRepresentation::kTaggedSigned) {
       node = InsertChangeTaggedSignedToInt32(node);
       op = machine()->ChangeInt32ToFloat64();
     } else if (output_type->Is(Type::Number())) {
@@ -435,10 +570,10 @@
 
   // Select the correct X -> Word32 operator.
   const Operator* op = nullptr;
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only use kNone representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    op = machine()->ImpossibleToWord32();
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Int32Constant(0);
   } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
@@ -470,10 +605,19 @@
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
-  } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::TaggedSigned())) {
+  } else if (output_rep == MachineRepresentation::kTaggedSigned) {
+    if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedSignedToInt32();
-    } else if (output_type->Is(Type::Unsigned32())) {
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      if (use_info.type_check() != TypeCheckKind::kNone) {
+        op = simplified()->CheckedTruncateTaggedToWord32();
+      } else {
+        op = simplified()->TruncateTaggedToWord32();
+      }
+    }
+  } else if (output_rep == MachineRepresentation::kTagged ||
+             output_rep == MachineRepresentation::kTaggedPointer) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = simplified()->ChangeTaggedToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
@@ -541,22 +685,43 @@
   switch (node->opcode()) {
     case IrOpcode::kHeapConstant: {
       Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
-      DCHECK(value.is_identical_to(factory()->true_value()) ||
-             value.is_identical_to(factory()->false_value()));
-      return jsgraph()->Int32Constant(
-          value.is_identical_to(factory()->true_value()) ? 1 : 0);
+      return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
     }
     default:
       break;
   }
   // Select the correct X -> Bit operator.
   const Operator* op;
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only use kNone representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    op = machine()->ImpossibleToBit();
-  } else if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeTaggedToBit();
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Int32Constant(0);
+  } else if (output_rep == MachineRepresentation::kTagged ||
+             output_rep == MachineRepresentation::kTaggedPointer) {
+    if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
+      // true is the only trueish Oddball.
+      op = simplified()->ChangeTaggedToBit();
+    } else {
+      op = simplified()->TruncateTaggedToBit();
+    }
+  } else if (output_rep == MachineRepresentation::kTaggedSigned) {
+    node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
+                                       jsgraph()->ZeroConstant());
+    return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+                                       jsgraph()->Int32Constant(0));
+  } else if (IsWord(output_rep)) {
+    node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+                                       jsgraph()->Int32Constant(0));
+    return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+                                       jsgraph()->Int32Constant(0));
+  } else if (output_rep == MachineRepresentation::kFloat32) {
+    node = jsgraph()->graph()->NewNode(machine()->Float32Abs(), node);
+    return jsgraph()->graph()->NewNode(machine()->Float32LessThan(),
+                                       jsgraph()->Float32Constant(0.0), node);
+  } else if (output_rep == MachineRepresentation::kFloat64) {
+    node = jsgraph()->graph()->NewNode(machine()->Float64Abs(), node);
+    return jsgraph()->graph()->NewNode(machine()->Float64LessThan(),
+                                       jsgraph()->Float64Constant(0.0), node);
   } else {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kBit);
@@ -566,10 +731,10 @@
 
 Node* RepresentationChanger::GetWord64RepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type) {
-  if (output_rep == MachineRepresentation::kNone) {
-    // We should only use kNone representation if the type is empty.
-    CHECK(!output_type->IsInhabited());
-    return jsgraph()->graph()->NewNode(machine()->ImpossibleToFloat64(), node);
+  if (output_type->Is(Type::None())) {
+    // This is an impossible value; it should not be used at runtime.
+    // We just provide a dummy value here.
+    return jsgraph()->Int64Constant(0);
   } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word64
   }
@@ -787,7 +952,7 @@
   if (!testing_type_errors_) {
     std::ostringstream out_str;
     out_str << output_rep << " (";
-    output_type->PrintTo(out_str, Type::SEMANTIC_DIM);
+    output_type->PrintTo(out_str);
     out_str << ")";
 
     std::ostringstream use_str;
@@ -802,6 +967,9 @@
   return node;
 }
 
+Node* RepresentationChanger::InsertChangeBitToTagged(Node* node) {
+  return jsgraph()->graph()->NewNode(simplified()->ChangeBitToTagged(), node);
+}
 
 Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
   return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index fac3280..f27108e 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -29,6 +29,9 @@
 
   // Queries.
   bool IsUnused() const { return kind_ == TruncationKind::kNone; }
+  bool IsUsedAsBool() const {
+    return LessGeneral(kind_, TruncationKind::kBool);
+  }
   bool IsUsedAsWord32() const {
     return LessGeneral(kind_, TruncationKind::kWord32);
   }
@@ -139,8 +142,18 @@
   static UseInfo AnyTagged() {
     return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
   }
+  static UseInfo TaggedSigned() {
+    return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any());
+  }
+  static UseInfo TaggedPointer() {
+    return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any());
+  }
 
   // Possibly deoptimizing conversions.
+  static UseInfo CheckedSignedSmallAsTaggedSigned() {
+    return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
+                   TypeCheckKind::kSignedSmall);
+  }
   static UseInfo CheckedSignedSmallAsWord32() {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
                    TypeCheckKind::kSignedSmall);
@@ -232,8 +245,15 @@
   bool testing_type_errors_;  // If {true}, don't abort on a type error.
   bool type_error_;           // Set when a type error is detected.
 
+  Node* GetTaggedSignedRepresentationFor(Node* node,
+                                         MachineRepresentation output_rep,
+                                         Type* output_type, Node* use_node,
+                                         UseInfo use_info);
+  Node* GetTaggedPointerRepresentationFor(Node* node,
+                                          MachineRepresentation output_rep,
+                                          Type* output_type);
   Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
-                                   Type* output_type);
+                                   Type* output_type, Truncation truncation);
   Node* GetFloat32RepresentationFor(Node* node,
                                     MachineRepresentation output_rep,
                                     Type* output_type, Truncation truncation);
@@ -251,6 +271,7 @@
   Node* TypeError(Node* node, MachineRepresentation output_rep,
                   Type* output_type, MachineRepresentation use);
   Node* MakeTruncatedInt32Constant(double value);
+  Node* InsertChangeBitToTagged(Node* node);
   Node* InsertChangeFloat32ToFloat64(Node* node);
   Node* InsertChangeFloat64ToInt32(Node* node);
   Node* InsertChangeFloat64ToUint32(Node* node);
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index e69a7ac..284c3fc 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -4,7 +4,7 @@
 
 #include "src/compiler/code-generator.h"
 
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -980,9 +980,6 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -992,8 +989,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -2195,7 +2192,8 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
@@ -2204,7 +2202,7 @@
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 6fc8a4d..f1aa332 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -327,6 +327,11 @@
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 void InstructionSelector::VisitStore(Node* node) {
   S390OperandGenerator g(this);
   Node* base = node->InputAt(0);
@@ -1099,7 +1104,7 @@
   Node* right = m.right().node();
   if (g.CanBeImmediate(right, kInt32Imm) &&
       base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
-    int power = 31 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
+    int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
     Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
          g.UseImmediate(power));
     return;
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 74ba835..4fc0d0a 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -7,7 +7,7 @@
 
 #include <iosfwd>
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 58c01cc..b4e74d9 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -11,10 +11,10 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/control-equivalence.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-marker.h"
 #include "src/compiler/node-properties.h"
-#include "src/zone-containers.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
index 269c271..416ba5c 100644
--- a/src/compiler/scheduler.h
+++ b/src/compiler/scheduler.h
@@ -10,7 +10,7 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/zone-pool.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index de64de3..97aacd6 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -20,9 +20,9 @@
 #include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/source-position.h"
+#include "src/compiler/type-cache.h"
 #include "src/conversions-inl.h"
 #include "src/objects.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -311,6 +311,9 @@
       bool updated = UpdateFeedbackType(node);
       TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
       VisitNode(node, info->truncation(), nullptr);
+      TRACE("  ==> output ");
+      PrintOutputInfo(info);
+      TRACE("\n");
       if (updated) {
         for (Node* const user : node->uses()) {
           if (GetInfo(user)->visited()) {
@@ -330,6 +333,9 @@
       bool updated = UpdateFeedbackType(node);
       TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
       VisitNode(node, info->truncation(), nullptr);
+      TRACE("  ==> output ");
+      PrintOutputInfo(info);
+      TRACE("\n");
       if (updated) {
         for (Node* const user : node->uses()) {
           if (GetInfo(user)->visited()) {
@@ -534,9 +540,6 @@
       TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
             info->truncation().description());
       VisitNode(node, info->truncation(), nullptr);
-      TRACE("  ==> output ");
-      PrintOutputInfo(info);
-      TRACE("\n");
     }
   }
 
@@ -804,41 +807,10 @@
     VisitBinop(node, UseInfo::TruncatingFloat64(),
                MachineRepresentation::kFloat64);
   }
-  void VisitInt32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(),
-               MachineRepresentation::kWord32);
-  }
   void VisitWord32TruncatingBinop(Node* node) {
     VisitBinop(node, UseInfo::TruncatingWord32(),
                MachineRepresentation::kWord32);
   }
-  void VisitUint32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(),
-               MachineRepresentation::kWord32);
-  }
-  void VisitInt64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(),
-               MachineRepresentation::kWord64);
-  }
-  void VisitUint64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(),
-               MachineRepresentation::kWord64);
-  }
-  void VisitFloat64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kBit);
-  }
-  void VisitInt32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
-  }
-  void VisitUint32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
-  }
-  void VisitInt64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
-  }
-  void VisitUint64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
-  }
 
   // Infer representation for phi-like nodes.
   // The {node} parameter is only used to decide on the int64 representation.
@@ -875,11 +847,13 @@
       bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
                        MachineRepresentation::kWord64;
 #ifdef DEBUG
-      // Check that all the inputs agree on being Word64.
-      DCHECK_EQ(IrOpcode::kPhi, node->opcode());  // This only works for phis.
-      for (int i = 1; i < node->op()->ValueInputCount(); i++) {
-        DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
-                                 MachineRepresentation::kWord64);
+      if (node->opcode() != IrOpcode::kTypeGuard) {
+        // Check that all the inputs agree on being Word64.
+        DCHECK_EQ(IrOpcode::kPhi, node->opcode());  // This only works for phis.
+        for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+          DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
+                                   MachineRepresentation::kWord64);
+        }
       }
 #endif
       return is_word64 ? MachineRepresentation::kWord64
@@ -937,6 +911,21 @@
     }
   }
 
+  void VisitObjectIs(Node* node, Type* type, SimplifiedLowering* lowering) {
+    Type* const input_type = TypeOf(node->InputAt(0));
+    if (input_type->Is(type)) {
+      VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+      if (lower()) {
+        DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+      }
+    } else {
+      VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+      if (lower() && !input_type->Maybe(type)) {
+        DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+      }
+    }
+  }
+
   void VisitCall(Node* node, SimplifiedLowering* lowering) {
     const CallDescriptor* desc = CallDescriptorOf(node->op());
     int params = static_cast<int>(desc->ParameterCount());
@@ -986,8 +975,11 @@
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
         NodeInfo* input_info = GetInfo(input);
-        MachineType machine_type(input_info->representation(),
-                                 DeoptValueSemanticOf(TypeOf(input)));
+        Type* input_type = TypeOf(input);
+        MachineRepresentation rep = input_type->IsInhabited()
+                                        ? input_info->representation()
+                                        : MachineRepresentation::kNone;
+        MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
         DCHECK(machine_type.representation() !=
                    MachineRepresentation::kWord32 ||
                machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -1023,12 +1015,12 @@
   WriteBarrierKind WriteBarrierKindFor(
       BaseTaggedness base_taggedness,
       MachineRepresentation field_representation, Type* field_type,
-      Node* value) {
+      MachineRepresentation value_representation, Node* value) {
     if (base_taggedness == kTaggedBase &&
-        field_representation == MachineRepresentation::kTagged) {
+        CanBeTaggedPointer(field_representation)) {
       Type* value_type = NodeProperties::GetType(value);
-      if (field_type->Is(Type::TaggedSigned()) ||
-          value_type->Is(Type::TaggedSigned())) {
+      if (field_representation == MachineRepresentation::kTaggedSigned ||
+          value_representation == MachineRepresentation::kTaggedSigned) {
         // Write barriers are only for stores of heap objects.
         return kNoWriteBarrier;
       }
@@ -1054,8 +1046,8 @@
           return kMapWriteBarrier;
         }
       }
-      if (field_type->Is(Type::TaggedPointer()) ||
-          value_type->Is(Type::TaggedPointer())) {
+      if (field_representation == MachineRepresentation::kTaggedPointer ||
+          value_representation == MachineRepresentation::kTaggedPointer) {
         // Write barriers for heap objects are cheaper.
         return kPointerWriteBarrier;
       }
@@ -1076,13 +1068,14 @@
   WriteBarrierKind WriteBarrierKindFor(
       BaseTaggedness base_taggedness,
       MachineRepresentation field_representation, int field_offset,
-      Type* field_type, Node* value) {
+      Type* field_type, MachineRepresentation value_representation,
+      Node* value) {
     if (base_taggedness == kTaggedBase &&
         field_offset == HeapObject::kMapOffset) {
       return kMapWriteBarrier;
     }
     return WriteBarrierKindFor(base_taggedness, field_representation,
-                               field_type, value);
+                               field_type, value_representation, value);
   }
 
   Graph* graph() const { return jsgraph_->graph(); }
@@ -1169,6 +1162,110 @@
     return;
   }
 
+  void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
+                                     SimplifiedLowering* lowering) {
+    // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+    // can only eliminate an unused speculative number operation if we know
+    // that the inputs are PlainPrimitive, which excludes everything that's
+    // might have side effects or throws during a ToNumber conversion.
+    if (BothInputsAre(node, Type::PlainPrimitive())) {
+      if (truncation.IsUnused()) return VisitUnused(node);
+    }
+    if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+        (truncation.IsUsedAsWord32() ||
+         NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+      // => unsigned Uint32Mod
+      VisitWord32TruncatingBinop(node);
+      if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+      return;
+    }
+    if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+        (truncation.IsUsedAsWord32() ||
+         NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+      // => signed Int32Mod
+      VisitWord32TruncatingBinop(node);
+      if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+      return;
+    }
+
+    // Try to use type feedback.
+    NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+    // Handle the case when no uint32 checks on inputs are necessary
+    // (but an overflow check is needed on the output).
+    if (BothInputsAreUnsigned32(node)) {
+      if (hint == NumberOperationHint::kSignedSmall ||
+          hint == NumberOperationHint::kSigned32) {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32, Type::Unsigned32());
+        if (lower()) ChangeToUint32OverflowOp(node);
+        return;
+      }
+    }
+
+    // Handle the case when no int32 checks on inputs are necessary
+    // (but an overflow check is needed on the output).
+    if (BothInputsAre(node, Type::Signed32())) {
+      // If both the inputs the feedback are int32, use the overflow op.
+      if (hint == NumberOperationHint::kSignedSmall ||
+          hint == NumberOperationHint::kSigned32) {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32, Type::Signed32());
+        if (lower()) ChangeToInt32OverflowOp(node);
+        return;
+      }
+    }
+
+    if (hint == NumberOperationHint::kSignedSmall ||
+        hint == NumberOperationHint::kSigned32) {
+      // If the result is truncated, we only need to check the inputs.
+      if (truncation.IsUsedAsWord32()) {
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32);
+        if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+      } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) {
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32, Type::Unsigned32());
+        if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+      } else {
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32, Type::Signed32());
+        if (lower()) ChangeToInt32OverflowOp(node);
+      }
+      return;
+    }
+
+    if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+        TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+        (truncation.IsUsedAsWord32() ||
+         NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+      // We can only promise Float64 truncation here, as the decision is
+      // based on the feedback types of the inputs.
+      VisitBinop(node,
+                 UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+                 MachineRepresentation::kWord32, Type::Number());
+      if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+      return;
+    }
+    if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+        TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+        (truncation.IsUsedAsWord32() ||
+         NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+      // We can only promise Float64 truncation here, as the decision is
+      // based on the feedback types of the inputs.
+      VisitBinop(node,
+                 UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+                 MachineRepresentation::kWord32, Type::Number());
+      if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+      return;
+    }
+    // default case => Float64Mod
+    VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+               MachineRepresentation::kFloat64, Type::Number());
+    if (lower()) ChangeToPureOp(node, Float64Op(node));
+    return;
+  }
+
   // Dispatching routine for visiting the node {node} with the usage {use}.
   // Depending on the operator, propagate new usage info to the inputs.
   void VisitNode(Node* node, Truncation truncation,
@@ -1195,22 +1292,13 @@
         // tho Start doesn't really produce a value, we have to say Tagged
         // here, otherwise the input conversion will fail.
         return VisitLeaf(node, MachineRepresentation::kTagged);
-      case IrOpcode::kDead:
-        return VisitLeaf(node, MachineRepresentation::kNone);
-      case IrOpcode::kParameter: {
+      case IrOpcode::kParameter:
         // TODO(titzer): use representation from linkage.
-        ProcessInput(node, 0, UseInfo::None());
-        SetOutput(node, MachineRepresentation::kTagged);
-        return;
-      }
+        return VisitUnop(node, UseInfo::None(), MachineRepresentation::kTagged);
       case IrOpcode::kInt32Constant:
         return VisitLeaf(node, MachineRepresentation::kWord32);
       case IrOpcode::kInt64Constant:
         return VisitLeaf(node, MachineRepresentation::kWord64);
-      case IrOpcode::kFloat32Constant:
-        return VisitLeaf(node, MachineRepresentation::kFloat32);
-      case IrOpcode::kFloat64Constant:
-        return VisitLeaf(node, MachineRepresentation::kFloat64);
       case IrOpcode::kExternalConstant:
         return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kNumberConstant:
@@ -1218,12 +1306,6 @@
       case IrOpcode::kHeapConstant:
         return VisitLeaf(node, MachineRepresentation::kTagged);
 
-      case IrOpcode::kDeoptimizeIf:
-      case IrOpcode::kDeoptimizeUnless:
-        ProcessInput(node, 0, UseInfo::Bool());
-        ProcessInput(node, 1, UseInfo::AnyTagged());
-        ProcessRemainingInputs(node, 2);
-        return;
       case IrOpcode::kBranch:
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -1242,6 +1324,18 @@
       //------------------------------------------------------------------
       // JavaScript operators.
       //------------------------------------------------------------------
+      case IrOpcode::kJSToBoolean: {
+        if (truncation.IsUsedAsBool()) {
+          ProcessInput(node, 0, UseInfo::Bool());
+          ProcessInput(node, 1, UseInfo::None());
+          SetOutput(node, MachineRepresentation::kBit);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitInputs(node);
+          SetOutput(node, MachineRepresentation::kTagged);
+        }
+        return;
+      }
       case IrOpcode::kJSToNumber: {
         VisitInputs(node);
         // TODO(bmeurer): Optimize somewhat based on input type?
@@ -1268,6 +1362,8 @@
             node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
             NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
           } else {
+            DCHECK_EQ(input_info->representation(),
+                      MachineRepresentation::kTagged);
             // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
             node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
             NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
@@ -1289,7 +1385,8 @@
              rhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
              OneInputCannotBe(node, type_cache_.kZeroish))) {
           // => unsigned Int32Cmp
-          VisitUint32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
           return;
         }
@@ -1299,12 +1396,14 @@
              rhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
              OneInputCannotBe(node, type_cache_.kZeroish))) {
           // => signed Int32Cmp
-          VisitInt32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
           return;
         }
         // => Float64Cmp
-        VisitFloat64Cmp(node);
+        VisitBinop(node, UseInfo::TruncatingFloat64(),
+                   MachineRepresentation::kBit);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
       }
@@ -1314,16 +1413,19 @@
         if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
             TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
           // => unsigned Int32Cmp
-          VisitUint32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
         } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
                    TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
           // => signed Int32Cmp
-          VisitInt32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
         } else {
           // => Float64Cmp
-          VisitFloat64Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingFloat64(),
+                     MachineRepresentation::kBit);
           if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         }
         return;
@@ -1347,13 +1449,15 @@
         if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
             TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
           // => unsigned Int32Cmp
-          VisitUint32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) ChangeToPureOp(node, Uint32Op(node));
           return;
         } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
                    TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
           // => signed Int32Cmp
-          VisitInt32Cmp(node);
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kBit);
           if (lower()) ChangeToPureOp(node, Int32Op(node));
           return;
         }
@@ -1490,10 +1594,10 @@
         }
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
-          // => signed Int32Div
-          VisitInt32Binop(node);
-          if (lower()) DeferReplacement(node, lowering->Int32Div(node));
-          return;
+            // => signed Int32Div
+            VisitWord32TruncatingBinop(node);
+            if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+            return;
           }
           if (truncation.IsUsedAsWord32()) {
             // => signed Int32Div
@@ -1562,7 +1666,7 @@
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
             // => signed Int32Div
-            VisitInt32Binop(node);
+            VisitWord32TruncatingBinop(node);
             if (lower()) DeferReplacement(node, lowering->Int32Div(node));
             return;
           }
@@ -1574,116 +1678,12 @@
           }
         }
         // Number x Number => Float64Div
-        if (BothInputsAre(node, Type::NumberOrUndefined())) {
-          VisitFloat64Binop(node);
-          if (lower()) ChangeToPureOp(node, Float64Op(node));
-          return;
-        }
-        // Checked float64 x float64 => float64
-        DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
-        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
-                   MachineRepresentation::kFloat64, Type::Number());
+        VisitFloat64Binop(node);
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
       }
-      case IrOpcode::kSpeculativeNumberModulus: {
-        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
-        // can only eliminate an unused speculative number operation if we know
-        // that the inputs are PlainPrimitive, which excludes everything that's
-        // might have side effects or throws during a ToNumber conversion.
-        if (BothInputsAre(node, Type::PlainPrimitive())) {
-          if (truncation.IsUnused()) return VisitUnused(node);
-        }
-        if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
-            (truncation.IsUsedAsWord32() ||
-             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
-          // => unsigned Uint32Mod
-          VisitWord32TruncatingBinop(node);
-          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
-          return;
-        }
-        if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
-            (truncation.IsUsedAsWord32() ||
-             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
-          // => signed Int32Mod
-          VisitWord32TruncatingBinop(node);
-          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-          return;
-        }
-
-        // Try to use type feedback.
-        NumberOperationHint hint = NumberOperationHintOf(node->op());
-
-        // Handle the case when no uint32 checks on inputs are necessary
-        // (but an overflow check is needed on the output).
-        if (BothInputsAreUnsigned32(node)) {
-          if (hint == NumberOperationHint::kSignedSmall ||
-              hint == NumberOperationHint::kSigned32) {
-            VisitBinop(node, UseInfo::TruncatingWord32(),
-                       MachineRepresentation::kWord32, Type::Unsigned32());
-            if (lower()) ChangeToUint32OverflowOp(node);
-            return;
-          }
-        }
-
-        // Handle the case when no int32 checks on inputs are necessary
-        // (but an overflow check is needed on the output).
-        if (BothInputsAre(node, Type::Signed32())) {
-          // If both the inputs the feedback are int32, use the overflow op.
-          if (hint == NumberOperationHint::kSignedSmall ||
-              hint == NumberOperationHint::kSigned32) {
-            VisitBinop(node, UseInfo::TruncatingWord32(),
-                       MachineRepresentation::kWord32, Type::Signed32());
-            if (lower()) ChangeToInt32OverflowOp(node);
-            return;
-          }
-        }
-
-        if (hint == NumberOperationHint::kSignedSmall ||
-            hint == NumberOperationHint::kSigned32) {
-          // If the result is truncated, we only need to check the inputs.
-          if (truncation.IsUsedAsWord32()) {
-            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
-                       MachineRepresentation::kWord32);
-            if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-          } else {
-            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
-                       MachineRepresentation::kWord32, Type::Signed32());
-            if (lower()) ChangeToInt32OverflowOp(node);
-          }
-          return;
-        }
-
-        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
-            TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
-            (truncation.IsUsedAsWord32() ||
-             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
-          // We can only promise Float64 truncation here, as the decision is
-          // based on the feedback types of the inputs.
-          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
-                                   Truncation::Float64()),
-                     MachineRepresentation::kWord32);
-          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
-          return;
-        }
-        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
-            TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
-            (truncation.IsUsedAsWord32() ||
-             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
-          // We can only promise Float64 truncation here, as the decision is
-          // based on the feedback types of the inputs.
-          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
-                                   Truncation::Float64()),
-                     MachineRepresentation::kWord32);
-          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-          return;
-        }
-        // default case => Float64Mod
-        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
-                   MachineRepresentation::kFloat64, Type::Number());
-        if (lower()) ChangeToPureOp(node, Float64Op(node));
-        return;
-      }
+      case IrOpcode::kSpeculativeNumberModulus:
+        return VisitSpeculativeNumberModulus(node, truncation, lowering);
       case IrOpcode::kNumberModulus: {
         if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
             (truncation.IsUsedAsWord32() ||
@@ -1733,7 +1733,7 @@
       case IrOpcode::kNumberBitwiseOr:
       case IrOpcode::kNumberBitwiseXor:
       case IrOpcode::kNumberBitwiseAnd: {
-        VisitInt32Binop(node);
+        VisitWord32TruncatingBinop(node);
         if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
         return;
       }
@@ -1895,13 +1895,13 @@
       case IrOpcode::kNumberMax: {
         // TODO(turbofan): We should consider feedback types here as well.
         if (BothInputsAreUnsigned32(node)) {
-          VisitUint32Binop(node);
+          VisitWord32TruncatingBinop(node);
           if (lower()) {
             lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
                             MachineRepresentation::kWord32);
           }
         } else if (BothInputsAreSigned32(node)) {
-          VisitInt32Binop(node);
+          VisitWord32TruncatingBinop(node);
           if (lower()) {
             lowering->DoMax(node, lowering->machine()->Int32LessThan(),
                             MachineRepresentation::kWord32);
@@ -1921,13 +1921,13 @@
       case IrOpcode::kNumberMin: {
         // TODO(turbofan): We should consider feedback types here as well.
         if (BothInputsAreUnsigned32(node)) {
-          VisitUint32Binop(node);
+          VisitWord32TruncatingBinop(node);
           if (lower()) {
             lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
                             MachineRepresentation::kWord32);
           }
         } else if (BothInputsAreSigned32(node)) {
-          VisitInt32Binop(node);
+          VisitWord32TruncatingBinop(node);
           if (lower()) {
             lowering->DoMin(node, lowering->machine()->Int32LessThan(),
                             MachineRepresentation::kWord32);
@@ -2002,6 +2002,23 @@
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
       }
+      case IrOpcode::kNumberToBoolean: {
+        Type* const input_type = TypeOf(node->InputAt(0));
+        if (input_type->Is(Type::Integral32())) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kBit);
+          if (lower()) lowering->DoIntegral32ToBit(node);
+        } else if (input_type->Is(Type::OrderedNumber())) {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kBit);
+          if (lower()) lowering->DoOrderedNumberToBit(node);
+        } else {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kBit);
+          if (lower()) lowering->DoNumberToBit(node);
+        }
+        return;
+      }
       case IrOpcode::kNumberToInt32: {
         // Just change representation if necessary.
         VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2023,62 +2040,11 @@
         }
         return;
       }
-      case IrOpcode::kStringEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        if (lower()) {
-          // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
-          Operator::Properties properties =
-              Operator::kCommutative | Operator::kEliminatable;
-          Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
-          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
-              flags, properties);
-          node->InsertInput(jsgraph_->zone(), 0,
-                            jsgraph_->HeapConstant(callable.code()));
-          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
-        }
-        return;
-      }
-      case IrOpcode::kStringLessThan: {
-        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        if (lower()) {
-          // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
-          Operator::Properties properties = Operator::kEliminatable;
-          Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
-          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
-              flags, properties);
-          node->InsertInput(jsgraph_->zone(), 0,
-                            jsgraph_->HeapConstant(callable.code()));
-          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
-        }
-        return;
-      }
+      case IrOpcode::kStringEqual:
+      case IrOpcode::kStringLessThan:
       case IrOpcode::kStringLessThanOrEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        if (lower()) {
-          // StringLessThanOrEqual(x, y)
-          //   => Call(StringLessThanOrEqualStub, x, y, no-context)
-          Operator::Properties properties = Operator::kEliminatable;
-          Callable callable =
-              CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
-          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
-              flags, properties);
-          node->InsertInput(jsgraph_->zone(), 0,
-                            jsgraph_->HeapConstant(callable.code()));
-          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
-        }
-        return;
+        return VisitBinop(node, UseInfo::AnyTagged(),
+                          MachineRepresentation::kTagged);
       }
       case IrOpcode::kStringCharCodeAt: {
         VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2090,23 +2056,36 @@
                   MachineRepresentation::kTagged);
         return;
       }
+      case IrOpcode::kStringFromCodePoint: {
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kTagged);
+        return;
+      }
 
       case IrOpcode::kCheckBounds: {
         Type* index_type = TypeOf(node->InputAt(0));
+        Type* length_type = TypeOf(node->InputAt(1));
         if (index_type->Is(Type::Unsigned32())) {
           VisitBinop(node, UseInfo::TruncatingWord32(),
                      MachineRepresentation::kWord32);
+          if (lower() && index_type->Max() < length_type->Min()) {
+            // The bounds check is redundant if we already know that
+            // the index is within the bounds of [0.0, length[.
+            DeferReplacement(node, node->InputAt(0));
+          }
         } else {
           VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
                      UseInfo::TruncatingWord32(),
                      MachineRepresentation::kWord32);
         }
-        if (lower()) {
-          // The bounds check is redundant if we already know that
-          // the index is within the bounds of [0.0, length[.
-          if (index_type->Is(NodeProperties::GetType(node))) {
-            DeferReplacement(node, node->InputAt(0));
-          }
+        return;
+      }
+      case IrOpcode::kCheckHeapObject: {
+        if (InputCannotBe(node, Type::SignedSmall())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         }
         return;
       }
@@ -2135,28 +2114,20 @@
         }
         return;
       }
-      case IrOpcode::kCheckString: {
-        if (InputIs(node, Type::String())) {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-          if (lower()) DeferReplacement(node, node->InputAt(0));
-        } else {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        }
-        return;
-      }
-      case IrOpcode::kCheckTaggedPointer: {
-        if (InputCannotBe(node, Type::SignedSmall())) {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-          if (lower()) DeferReplacement(node, node->InputAt(0));
-        } else {
-          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        }
-        return;
-      }
-      case IrOpcode::kCheckTaggedSigned: {
+      case IrOpcode::kCheckSmi: {
         if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
           VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
                     MachineRepresentation::kWord32);
+        } else {
+          VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+                    MachineRepresentation::kTaggedSigned);
+        }
+        if (lower()) DeferReplacement(node, node->InputAt(0));
+        return;
+      }
+      case IrOpcode::kCheckString: {
+        if (InputIs(node, Type::String())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
           VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
@@ -2175,15 +2146,16 @@
         FieldAccess access = FieldAccessOf(node->op());
         MachineRepresentation const representation =
             access.machine_type.representation();
-        // TODO(bmeurer): Introduce an appropriate tagged-signed machine rep.
         VisitUnop(node, UseInfoForBasePointer(access), representation);
         return;
       }
       case IrOpcode::kStoreField: {
         FieldAccess access = FieldAccessOf(node->op());
+        NodeInfo* input_info = GetInfo(node->InputAt(1));
         WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
             access.base_is_tagged, access.machine_type.representation(),
-            access.offset, access.type, node->InputAt(1));
+            access.offset, access.type, input_info->representation(),
+            node->InputAt(1));
         ProcessInput(node, 0, UseInfoForBasePointer(access));
         ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
                                   access.machine_type.representation()));
@@ -2255,9 +2227,10 @@
       }
       case IrOpcode::kStoreElement: {
         ElementAccess access = ElementAccessOf(node->op());
+        NodeInfo* input_info = GetInfo(node->InputAt(2));
         WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
             access.base_is_tagged, access.machine_type.representation(),
-            access.type, node->InputAt(2));
+            access.type, input_info->representation(), node->InputAt(2));
         ProcessInput(node, 0, UseInfoForBasePointer(access));  // base
         ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
         ProcessInput(node, 2,
@@ -2336,14 +2309,34 @@
         }
         return;
       }
-      case IrOpcode::kObjectIsCallable:
-      case IrOpcode::kObjectIsNumber:
-      case IrOpcode::kObjectIsReceiver:
-      case IrOpcode::kObjectIsSmi:
-      case IrOpcode::kObjectIsString:
+      case IrOpcode::kObjectIsCallable: {
+        // TODO(turbofan): Add Type::Callable to optimize this?
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+        return;
+      }
+      case IrOpcode::kObjectIsNumber: {
+        VisitObjectIs(node, Type::Number(), lowering);
+        return;
+      }
+      case IrOpcode::kObjectIsReceiver: {
+        VisitObjectIs(node, Type::Receiver(), lowering);
+        return;
+      }
+      case IrOpcode::kObjectIsSmi: {
+        // TODO(turbofan): Optimize based on input representation.
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+        return;
+      }
+      case IrOpcode::kObjectIsString: {
+        VisitObjectIs(node, Type::String(), lowering);
+        return;
+      }
       case IrOpcode::kObjectIsUndetectable: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, MachineRepresentation::kBit);
+        VisitObjectIs(node, Type::Undetectable(), lowering);
+        return;
+      }
+      case IrOpcode::kArrayBufferWasNeutered: {
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         return;
       }
       case IrOpcode::kCheckFloat64Hole: {
@@ -2403,158 +2396,11 @@
         return;
       }
 
-      //------------------------------------------------------------------
-      // Machine-level operators.
-      //------------------------------------------------------------------
-      case IrOpcode::kLoad: {
-        // TODO(jarin) Eventually, we should get rid of all machine stores
-        // from the high-level phases, then this becomes UNREACHABLE.
-        LoadRepresentation rep = LoadRepresentationOf(node->op());
-        ProcessInput(node, 0, UseInfo::AnyTagged());   // tagged pointer
-        ProcessInput(node, 1, UseInfo::PointerInt());  // index
-        ProcessRemainingInputs(node, 2);
-        return SetOutput(node, rep.representation());
-      }
-      case IrOpcode::kStore: {
-        // TODO(jarin) Eventually, we should get rid of all machine stores
-        // from the high-level phases, then this becomes UNREACHABLE.
-        StoreRepresentation rep = StoreRepresentationOf(node->op());
-        ProcessInput(node, 0, UseInfo::AnyTagged());   // tagged pointer
-        ProcessInput(node, 1, UseInfo::PointerInt());  // index
-        ProcessInput(node, 2,
-                     TruncatingUseInfoFromRepresentation(rep.representation()));
-        ProcessRemainingInputs(node, 3);
-        return SetOutput(node, MachineRepresentation::kNone);
-      }
-      case IrOpcode::kWord32Shr:
-        // We output unsigned int32 for shift right because JavaScript.
-        return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          MachineRepresentation::kWord32);
-      case IrOpcode::kWord32And:
-      case IrOpcode::kWord32Or:
-      case IrOpcode::kWord32Xor:
-      case IrOpcode::kWord32Shl:
-      case IrOpcode::kWord32Sar:
-        // We use signed int32 as the output type for these word32 operations,
-        // though the machine bits are the same for either signed or unsigned,
-        // because JavaScript considers the result from these operations signed.
-        return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          MachineRepresentation::kWord32);
-      case IrOpcode::kWord32Equal:
-        return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          MachineRepresentation::kBit);
-
-      case IrOpcode::kWord32Clz:
-        return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         MachineRepresentation::kWord32);
-
-      case IrOpcode::kInt32Add:
-      case IrOpcode::kInt32Sub:
-      case IrOpcode::kInt32Mul:
-      case IrOpcode::kInt32MulHigh:
-      case IrOpcode::kInt32Div:
-      case IrOpcode::kInt32Mod:
-        return VisitInt32Binop(node);
-      case IrOpcode::kUint32Div:
-      case IrOpcode::kUint32Mod:
-      case IrOpcode::kUint32MulHigh:
-        return VisitUint32Binop(node);
-      case IrOpcode::kInt32LessThan:
-      case IrOpcode::kInt32LessThanOrEqual:
-        return VisitInt32Cmp(node);
-
-      case IrOpcode::kUint32LessThan:
-      case IrOpcode::kUint32LessThanOrEqual:
-        return VisitUint32Cmp(node);
-
-      case IrOpcode::kInt64Add:
-      case IrOpcode::kInt64Sub:
-      case IrOpcode::kInt64Mul:
-      case IrOpcode::kInt64Div:
-      case IrOpcode::kInt64Mod:
-        return VisitInt64Binop(node);
-      case IrOpcode::kInt64LessThan:
-      case IrOpcode::kInt64LessThanOrEqual:
-        return VisitInt64Cmp(node);
-
-      case IrOpcode::kUint64LessThan:
-        return VisitUint64Cmp(node);
-
-      case IrOpcode::kUint64Div:
-      case IrOpcode::kUint64Mod:
-        return VisitUint64Binop(node);
-
-      case IrOpcode::kWord64And:
-      case IrOpcode::kWord64Or:
-      case IrOpcode::kWord64Xor:
-      case IrOpcode::kWord64Shl:
-      case IrOpcode::kWord64Shr:
-      case IrOpcode::kWord64Sar:
-        return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          MachineRepresentation::kWord64);
-      case IrOpcode::kWord64Equal:
-        return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          MachineRepresentation::kBit);
-
-      case IrOpcode::kChangeInt32ToInt64:
-        return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         MachineRepresentation::kWord64);
-      case IrOpcode::kChangeUint32ToUint64:
-        return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         MachineRepresentation::kWord64);
-      case IrOpcode::kTruncateFloat64ToFloat32:
-        return VisitUnop(node, UseInfo::TruncatingFloat64(),
-                         MachineRepresentation::kFloat32);
-      case IrOpcode::kTruncateFloat64ToWord32:
-        return VisitUnop(node, UseInfo::TruncatingFloat64(),
-                         MachineRepresentation::kWord32);
-
-      case IrOpcode::kChangeInt32ToFloat64:
-        return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         MachineRepresentation::kFloat64);
-      case IrOpcode::kChangeUint32ToFloat64:
-        return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         MachineRepresentation::kFloat64);
-      case IrOpcode::kFloat64Add:
-      case IrOpcode::kFloat64Sub:
-      case IrOpcode::kFloat64Mul:
-      case IrOpcode::kFloat64Div:
-      case IrOpcode::kFloat64Mod:
-      case IrOpcode::kFloat64Min:
-        return VisitFloat64Binop(node);
-      case IrOpcode::kFloat64Abs:
-      case IrOpcode::kFloat64Sqrt:
-      case IrOpcode::kFloat64RoundDown:
-      case IrOpcode::kFloat64RoundTruncate:
-      case IrOpcode::kFloat64RoundTiesAway:
-      case IrOpcode::kFloat64RoundUp:
-        return VisitUnop(node, UseInfo::TruncatingFloat64(),
-                         MachineRepresentation::kFloat64);
-      case IrOpcode::kFloat64SilenceNaN:
-        return VisitUnop(node, UseInfo::TruncatingFloat64(),
-                         MachineRepresentation::kFloat64);
-      case IrOpcode::kFloat64Equal:
-      case IrOpcode::kFloat64LessThan:
-      case IrOpcode::kFloat64LessThanOrEqual:
-        return VisitFloat64Cmp(node);
-      case IrOpcode::kFloat64ExtractLowWord32:
-      case IrOpcode::kFloat64ExtractHighWord32:
-        return VisitUnop(node, UseInfo::TruncatingFloat64(),
-                         MachineRepresentation::kWord32);
-      case IrOpcode::kFloat64InsertLowWord32:
-      case IrOpcode::kFloat64InsertHighWord32:
-        return VisitBinop(node, UseInfo::TruncatingFloat64(),
-                          UseInfo::TruncatingWord32(),
-                          MachineRepresentation::kFloat64);
       case IrOpcode::kNumberSilenceNaN:
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
-      case IrOpcode::kLoadStackPointer:
-      case IrOpcode::kLoadFramePointer:
-      case IrOpcode::kLoadParentFramePointer:
-        return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kStateValues:
         return VisitStateValues(node);
       case IrOpcode::kTypeGuard: {
@@ -2563,37 +2409,55 @@
         // the sigma's type.
         MachineRepresentation output =
             GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
-
         VisitUnop(node, UseInfo(output, truncation), output);
         if (lower()) DeferReplacement(node, node->InputAt(0));
         return;
       }
 
-      // The following opcodes are not produced before representation
-      // inference runs, so we do not have any real test coverage.
-      // Simply fail here.
-      case IrOpcode::kChangeFloat64ToInt32:
-      case IrOpcode::kChangeFloat64ToUint32:
-      case IrOpcode::kTruncateInt64ToInt32:
-      case IrOpcode::kChangeFloat32ToFloat64:
-      case IrOpcode::kCheckedInt32Add:
-      case IrOpcode::kCheckedInt32Sub:
-      case IrOpcode::kCheckedUint32ToInt32:
-      case IrOpcode::kCheckedFloat64ToInt32:
-      case IrOpcode::kCheckedTaggedToInt32:
-      case IrOpcode::kCheckedTaggedToFloat64:
-      case IrOpcode::kPlainPrimitiveToWord32:
-      case IrOpcode::kPlainPrimitiveToFloat64:
-      case IrOpcode::kLoopExit:
-      case IrOpcode::kLoopExitValue:
-      case IrOpcode::kLoopExitEffect:
-        FATAL("Representation inference: unsupported opcodes.");
-        break;
-
-      default:
+      // Operators with all inputs tagged and no or tagged output have uniform
+      // handling.
+      case IrOpcode::kEnd:
+      case IrOpcode::kReturn:
+      case IrOpcode::kIfSuccess:
+      case IrOpcode::kIfException:
+      case IrOpcode::kIfTrue:
+      case IrOpcode::kIfFalse:
+      case IrOpcode::kDeoptimize:
+      case IrOpcode::kEffectPhi:
+      case IrOpcode::kTerminate:
+      case IrOpcode::kFrameState:
+      case IrOpcode::kCheckpoint:
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+      case IrOpcode::kThrow:
+      case IrOpcode::kBeginRegion:
+      case IrOpcode::kFinishRegion:
+      case IrOpcode::kOsrValue:
+      case IrOpcode::kProjection:
+      case IrOpcode::kObjectState:
+// All JavaScript operators except JSToNumber have uniform handling.
+#define OPCODE_CASE(name) case IrOpcode::k##name:
+        JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
+        JS_OTHER_UNOP_LIST(OPCODE_CASE)
+        JS_OBJECT_OP_LIST(OPCODE_CASE)
+        JS_CONTEXT_OP_LIST(OPCODE_CASE)
+        JS_OTHER_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+      case IrOpcode::kJSToInteger:
+      case IrOpcode::kJSToLength:
+      case IrOpcode::kJSToName:
+      case IrOpcode::kJSToObject:
+      case IrOpcode::kJSToString:
         VisitInputs(node);
         // Assume the output is tagged.
         return SetOutput(node, MachineRepresentation::kTagged);
+
+      default:
+        V8_Fatal(
+            __FILE__, __LINE__,
+            "Representation inference: unsupported opcode %i (%s), node #%i\n.",
+            node->opcode(), node->op()->mnemonic(), node->id());
+        break;
     }
     UNREACHABLE();
   }
@@ -2977,7 +2841,7 @@
       graph()->NewNode(
           common()->Select(MachineRepresentation::kFloat64),
           graph()->NewNode(machine()->Float64LessThan(), zero, input), one,
-          zero));
+          input));
 }
 
 Node* SimplifiedLowering::Int32Abs(Node* const node) {
@@ -3307,6 +3171,34 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
+  Node* const input = node->InputAt(0);
+  Node* const zero = jsgraph()->Int32Constant(0);
+  Operator const* const op = machine()->Word32Equal();
+
+  node->ReplaceInput(0, graph()->NewNode(op, input, zero));
+  node->AppendInput(graph()->zone(), zero);
+  NodeProperties::ChangeOp(node, op);
+}
+
+void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
+  Node* const input = node->InputAt(0);
+
+  node->ReplaceInput(0, graph()->NewNode(machine()->Float64Equal(), input,
+                                         jsgraph()->Float64Constant(0.0)));
+  node->AppendInput(graph()->zone(), jsgraph()->Int32Constant(0));
+  NodeProperties::ChangeOp(node, machine()->Word32Equal());
+}
+
+void SimplifiedLowering::DoNumberToBit(Node* node) {
+  Node* const input = node->InputAt(0);
+
+  node->ReplaceInput(0, jsgraph()->Float64Constant(0.0));
+  node->AppendInput(graph()->zone(),
+                    graph()->NewNode(machine()->Float64Abs(), input));
+  NodeProperties::ChangeOp(node, machine()->Float64LessThan());
+}
+
 Node* SimplifiedLowering::ToNumberCode() {
   if (!to_number_code_.is_set()) {
     Callable callable = CodeFactory::ToNumber(isolate());
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 18c7331..9e2a499 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -12,17 +12,13 @@
 
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class TypeCache;
-
-
 namespace compiler {
 
 // Forward declarations.
 class RepresentationChanger;
 class RepresentationSelector;
 class SourcePositionTable;
+class TypeCache;
 
 class SimplifiedLowering final {
  public:
@@ -45,6 +41,9 @@
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
   void DoStringToNumber(Node* node);
+  void DoIntegral32ToBit(Node* node);
+  void DoOrderedNumberToBit(Node* node);
+  void DoNumberToBit(Node* node);
 
  private:
   JSGraph* const jsgraph_;
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index d8bd1e0..d172adc 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -9,8 +9,8 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
 #include "src/conversions-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -126,6 +126,14 @@
       }
       break;
     }
+    case IrOpcode::kCheckedTaggedSignedToInt32: {
+      NodeMatcher m(node->InputAt(0));
+      if (m.IsConvertTaggedHoleToUndefined()) {
+        node->ReplaceInput(0, m.InputAt(0));
+        return Changed(node);
+      }
+      break;
+    }
     case IrOpcode::kCheckIf: {
       HeapObjectMatcher m(node->InputAt(0));
       if (m.Is(factory()->true_value())) {
@@ -142,22 +150,30 @@
       }
       break;
     }
-    case IrOpcode::kCheckTaggedPointer: {
+    case IrOpcode::kCheckHeapObject: {
       Node* const input = node->InputAt(0);
       if (DecideObjectIsSmi(input) == Decision::kFalse) {
         ReplaceWithValue(node, input);
         return Replace(input);
       }
+      NodeMatcher m(input);
+      if (m.IsCheckHeapObject()) {
+        ReplaceWithValue(node, input);
+        return Replace(input);
+      }
       break;
     }
-    case IrOpcode::kCheckTaggedSigned: {
+    case IrOpcode::kCheckSmi: {
       Node* const input = node->InputAt(0);
       if (DecideObjectIsSmi(input) == Decision::kTrue) {
         ReplaceWithValue(node, input);
         return Replace(input);
       }
       NodeMatcher m(input);
-      if (m.IsConvertTaggedHoleToUndefined()) {
+      if (m.IsCheckSmi()) {
+        ReplaceWithValue(node, input);
+        return Replace(input);
+      } else if (m.IsConvertTaggedHoleToUndefined()) {
         node->ReplaceInput(0, m.InputAt(0));
         return Changed(node);
       }
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index cf0c3de..400db97 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -7,7 +7,7 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/types.h"
+#include "src/compiler/types.h"
 
 namespace v8 {
 namespace internal {
@@ -208,8 +208,7 @@
 }
 
 CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
-  DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
-         op->opcode() == IrOpcode::kCheckedInt32Mul ||
+  DCHECK(op->opcode() == IrOpcode::kCheckedInt32Mul ||
          op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
          op->opcode() == IrOpcode::kCheckedTaggedToInt32);
   return OpParameter<CheckForMinusZeroMode>(op);
@@ -332,6 +331,16 @@
   return OpParameter<NumberOperationHint>(op);
 }
 
+PretenureFlag PretenureFlagOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
+  return OpParameter<PretenureFlag>(op);
+}
+
+UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint);
+  return OpParameter<UnicodeEncoding>(op);
+}
+
 #define PURE_OP_LIST(V)                                          \
   V(BooleanNot, Operator::kNoProperties, 1, 0)                   \
   V(NumberEqual, Operator::kCommutative, 2, 0)                   \
@@ -381,6 +390,7 @@
   V(NumberTan, Operator::kNoProperties, 1, 0)                    \
   V(NumberTanh, Operator::kNoProperties, 1, 0)                   \
   V(NumberTrunc, Operator::kNoProperties, 1, 0)                  \
+  V(NumberToBoolean, Operator::kNoProperties, 1, 0)              \
   V(NumberToInt32, Operator::kNoProperties, 1, 0)                \
   V(NumberToUint32, Operator::kNoProperties, 1, 0)               \
   V(NumberSilenceNaN, Operator::kNoProperties, 1, 0)             \
@@ -393,11 +403,13 @@
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0)          \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0)         \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0)        \
+  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0)        \
   V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0)    \
   V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0)          \
   V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0)         \
   V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0)            \
   V(ChangeBitToTagged, Operator::kNoProperties, 1, 0)            \
+  V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0)          \
   V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0)       \
   V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0)      \
   V(ObjectIsCallable, Operator::kNoProperties, 1, 0)             \
@@ -418,22 +430,25 @@
   V(SpeculativeNumberLessThan)                \
   V(SpeculativeNumberLessThanOrEqual)
 
-#define CHECKED_OP_LIST(V)            \
-  V(CheckBounds, 2, 1)                \
-  V(CheckIf, 1, 0)                    \
-  V(CheckNumber, 1, 1)                \
-  V(CheckString, 1, 1)                \
-  V(CheckTaggedHole, 1, 1)            \
-  V(CheckTaggedPointer, 1, 1)         \
-  V(CheckTaggedSigned, 1, 1)          \
-  V(CheckedInt32Add, 2, 1)            \
-  V(CheckedInt32Sub, 2, 1)            \
-  V(CheckedInt32Div, 2, 1)            \
-  V(CheckedInt32Mod, 2, 1)            \
-  V(CheckedUint32Div, 2, 1)           \
-  V(CheckedUint32Mod, 2, 1)           \
-  V(CheckedUint32ToInt32, 1, 1)       \
-  V(CheckedTaggedSignedToInt32, 1, 1) \
+#define CHECKED_OP_LIST(V)             \
+  V(CheckBounds, 2, 1)                 \
+  V(CheckHeapObject, 1, 1)             \
+  V(CheckIf, 1, 0)                     \
+  V(CheckNumber, 1, 1)                 \
+  V(CheckSmi, 1, 1)                    \
+  V(CheckString, 1, 1)                 \
+  V(CheckTaggedHole, 1, 1)             \
+  V(CheckedInt32Add, 2, 1)             \
+  V(CheckedInt32Sub, 2, 1)             \
+  V(CheckedInt32Div, 2, 1)             \
+  V(CheckedInt32Mod, 2, 1)             \
+  V(CheckedUint32Div, 2, 1)            \
+  V(CheckedUint32Mod, 2, 1)            \
+  V(CheckedUint32ToInt32, 1, 1)        \
+  V(CheckedUint32ToTaggedSigned, 1, 1) \
+  V(CheckedInt32ToTaggedSigned, 1, 1)  \
+  V(CheckedTaggedSignedToInt32, 1, 1)  \
+  V(CheckedTaggedToTaggedSigned, 1, 1) \
   V(CheckedTruncateTaggedToWord32, 1, 1)
 
 struct SimplifiedOperatorGlobalCache final {
@@ -458,18 +473,24 @@
   CHECKED_OP_LIST(CHECKED)
 #undef CHECKED
 
-  template <CheckForMinusZeroMode kMode>
-  struct ChangeFloat64ToTaggedOperator final
-      : public Operator1<CheckForMinusZeroMode> {
-    ChangeFloat64ToTaggedOperator()
-        : Operator1<CheckForMinusZeroMode>(
-              IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
-              "ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
+  template <UnicodeEncoding kEncoding>
+  struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
+    StringFromCodePointOperator()
+        : Operator1<UnicodeEncoding>(IrOpcode::kStringFromCodePoint,
+                                     Operator::kPure, "StringFromCodePoint", 1,
+                                     0, 0, 1, 0, 0, kEncoding) {}
   };
-  ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
-      kChangeFloat64ToTaggedCheckForMinusZeroOperator;
-  ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
-      kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+  StringFromCodePointOperator<UnicodeEncoding::UTF16>
+      kStringFromCodePointOperatorUTF16;
+  StringFromCodePointOperator<UnicodeEncoding::UTF32>
+      kStringFromCodePointOperatorUTF32;
+
+  struct ArrayBufferWasNeuteredOperator final : public Operator {
+    ArrayBufferWasNeuteredOperator()
+        : Operator(IrOpcode::kArrayBufferWasNeutered, Operator::kEliminatable,
+                   "ArrayBufferWasNeutered", 1, 1, 1, 1, 1, 0) {}
+  };
+  ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
 
   template <CheckForMinusZeroMode kMode>
   struct CheckedInt32MulOperator final
@@ -614,20 +635,9 @@
   const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(GET_FROM_CACHE)
 CHECKED_OP_LIST(GET_FROM_CACHE)
+GET_FROM_CACHE(ArrayBufferWasNeutered)
 #undef GET_FROM_CACHE
 
-const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
-    CheckForMinusZeroMode mode) {
-  switch (mode) {
-    case CheckForMinusZeroMode::kCheckForMinusZero:
-      return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
-    case CheckForMinusZeroMode::kDontCheckForMinusZero:
-      return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
-  }
-  UNREACHABLE();
-  return nullptr;
-}
-
 const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
     CheckForMinusZeroMode mode) {
   switch (mode) {
@@ -761,6 +771,18 @@
   return nullptr;
 }
 
+const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
+    UnicodeEncoding encoding) {
+  switch (encoding) {
+    case UnicodeEncoding::UTF16:
+      return &cache_.kStringFromCodePointOperatorUTF16;
+    case UnicodeEncoding::UTF32:
+      return &cache_.kStringFromCodePointOperatorUTF32;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
 #define SPECULATIVE_NUMBER_BINOP(Name)                                        \
   const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
     switch (hint) {                                                           \
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 5e7fa75..a904391 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -8,6 +8,7 @@
 #include <iosfwd>
 
 #include "src/compiler/operator.h"
+#include "src/compiler/types.h"
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
@@ -16,10 +17,8 @@
 namespace internal {
 
 // Forward declarations.
-class Type;
 class Zone;
 
-
 namespace compiler {
 
 // Forward declarations.
@@ -184,6 +183,10 @@
 NumberOperationHint NumberOperationHintOf(const Operator* op)
     WARN_UNUSED_RESULT;
 
+PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
+
+UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
+
 // Interface for building simplified operators, which represent the
 // medium-level operations of V8, including adding numbers, allocating objects,
 // indexing into objects and arrays, etc.
@@ -259,6 +262,7 @@
   const Operator* NumberTan();
   const Operator* NumberTanh();
   const Operator* NumberTrunc();
+  const Operator* NumberToBoolean();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
 
@@ -287,6 +291,7 @@
   const Operator* StringLessThanOrEqual();
   const Operator* StringCharCodeAt();
   const Operator* StringFromCharCode();
+  const Operator* StringFromCodePoint(UnicodeEncoding encoding);
 
   const Operator* PlainPrimitiveToNumber();
   const Operator* PlainPrimitiveToWord32();
@@ -299,19 +304,21 @@
   const Operator* ChangeInt31ToTaggedSigned();
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
-  const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
+  const Operator* ChangeFloat64ToTagged();
   const Operator* ChangeTaggedToBit();
   const Operator* ChangeBitToTagged();
   const Operator* TruncateTaggedToWord32();
   const Operator* TruncateTaggedToFloat64();
+  const Operator* TruncateTaggedToBit();
 
   const Operator* CheckIf();
   const Operator* CheckBounds();
   const Operator* CheckMaps(int map_input_count);
+
+  const Operator* CheckHeapObject();
   const Operator* CheckNumber();
+  const Operator* CheckSmi();
   const Operator* CheckString();
-  const Operator* CheckTaggedPointer();
-  const Operator* CheckTaggedSigned();
 
   const Operator* CheckedInt32Add();
   const Operator* CheckedInt32Sub();
@@ -320,11 +327,14 @@
   const Operator* CheckedUint32Div();
   const Operator* CheckedUint32Mod();
   const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
+  const Operator* CheckedInt32ToTaggedSigned();
   const Operator* CheckedUint32ToInt32();
+  const Operator* CheckedUint32ToTaggedSigned();
   const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
   const Operator* CheckedTaggedSignedToInt32();
   const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
   const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+  const Operator* CheckedTaggedToTaggedSigned();
   const Operator* CheckedTruncateTaggedToWord32();
 
   const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
@@ -338,6 +348,9 @@
   const Operator* ObjectIsString();
   const Operator* ObjectIsUndetectable();
 
+  // array-buffer-was-neutered buffer
+  const Operator* ArrayBufferWasNeutered();
+
   // ensure-writable-fast-elements object, elements
   const Operator* EnsureWritableFastElements();
 
diff --git a/src/compiler/state-values-utils.h b/src/compiler/state-values-utils.h
index 79550bd..704f5f6 100644
--- a/src/compiler/state-values-utils.h
+++ b/src/compiler/state-values-utils.h
@@ -55,7 +55,7 @@
   Zone* zone() { return graph()->zone(); }
 
   JSGraph* js_graph_;
-  ZoneHashMap hash_map_;
+  CustomMatcherZoneHashMap hash_map_;
   ZoneVector<NodeVector*> working_space_;  // One working space per level.
   Node* empty_state_values_;
 };
diff --git a/src/compiler/store-store-elimination.cc b/src/compiler/store-store-elimination.cc
index 98904b0..196cb0d 100644
--- a/src/compiler/store-store-elimination.cc
+++ b/src/compiler/store-store-elimination.cc
@@ -72,9 +72,7 @@
 
 namespace {
 
-// 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
-// few.
-typedef uint16_t StoreOffset;
+typedef uint32_t StoreOffset;
 
 struct UnobservableStore {
   NodeId id_;
@@ -171,11 +169,11 @@
   const UnobservablesSet unobservables_visited_empty_;
 };
 
-// To safely cast an offset from a FieldAccess, which has a wider range
-// (namely int).
+// To safely cast an offset from a FieldAccess, which has a potentially wider
+// range (namely int).
 StoreOffset ToOffset(int offset) {
-  CHECK(0 <= offset && offset < (1 << 8 * sizeof(StoreOffset)));
-  return (StoreOffset)offset;
+  CHECK(0 <= offset);
+  return static_cast<StoreOffset>(offset);
 }
 
 StoreOffset ToOffset(const FieldAccess& access) {
@@ -405,11 +403,9 @@
     // Mark effect inputs for visiting.
     for (int i = 0; i < node->op()->EffectInputCount(); i++) {
       Node* input = NodeProperties::GetEffectInput(node, i);
-      if (!HasBeenVisited(input)) {
-        TRACE("    marking #%d:%s for revisit", input->id(),
-              input->op()->mnemonic());
-        MarkForRevisit(input);
-      }
+      TRACE("    marking #%d:%s for revisit", input->id(),
+            input->op()->mnemonic());
+      MarkForRevisit(input);
     }
   }
 }
diff --git a/src/compiler/store-store-elimination.h b/src/compiler/store-store-elimination.h
index 07ae2c2..cda7591 100644
--- a/src/compiler/store-store-elimination.h
+++ b/src/compiler/store-store-elimination.h
@@ -7,7 +7,7 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/type-cache.cc b/src/compiler/type-cache.cc
similarity index 84%
rename from src/type-cache.cc
rename to src/compiler/type-cache.cc
index d05aaa1..cd80dc3 100644
--- a/src/type-cache.cc
+++ b/src/compiler/type-cache.cc
@@ -2,12 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/type-cache.h"
+#include "src/compiler/type-cache.h"
 
 #include "src/base/lazy-instance.h"
 
 namespace v8 {
 namespace internal {
+namespace compiler {
 
 namespace {
 
@@ -15,9 +16,9 @@
 
 }  // namespace
 
-
 // static
 TypeCache const& TypeCache::Get() { return kCache.Get(); }
 
+}  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-cache.h b/src/compiler/type-cache.h
new file mode 100644
index 0000000..aa51dac
--- /dev/null
+++ b/src/compiler/type-cache.h
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_CACHE_H_
+#define V8_COMPILER_TYPE_CACHE_H_
+
+#include "src/compiler/types.h"
+#include "src/date.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class TypeCache final {
+ private:
+  // This has to be first for the initialization magic to work.
+  AccountingAllocator allocator;
+  Zone zone_;
+
+ public:
+  static TypeCache const& Get();
+
+  TypeCache() : zone_(&allocator) {}
+
+  Type* const kInt8 = CreateRange<int8_t>();
+  Type* const kUint8 = CreateRange<uint8_t>();
+  Type* const kUint8Clamped = kUint8;
+  Type* const kInt16 = CreateRange<int16_t>();
+  Type* const kUint16 = CreateRange<uint16_t>();
+  Type* const kInt32 = Type::Signed32();
+  Type* const kUint32 = Type::Unsigned32();
+  Type* const kFloat32 = Type::Number();
+  Type* const kFloat64 = Type::Number();
+
+  Type* const kSmi = Type::SignedSmall();
+  Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
+  Type* const kHeapNumber = Type::Number();
+
+  Type* const kSingletonZero = CreateRange(0.0, 0.0);
+  Type* const kSingletonOne = CreateRange(1.0, 1.0);
+  Type* const kSingletonTen = CreateRange(10.0, 10.0);
+  Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+  Type* const kZeroOrUndefined =
+      Type::Union(kSingletonZero, Type::Undefined(), zone());
+  Type* const kTenOrUndefined =
+      Type::Union(kSingletonTen, Type::Undefined(), zone());
+  Type* const kMinusOneOrZero = CreateRange(-1.0, 0.0);
+  Type* const kMinusOneToOneOrMinusZeroOrNaN = Type::Union(
+      Type::Union(CreateRange(-1.0, 1.0), Type::MinusZero(), zone()),
+      Type::NaN(), zone());
+  Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+  Type* const kZeroOrOneOrNaN = Type::Union(kZeroOrOne, Type::NaN(), zone());
+  Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
+  Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
+  Type* const kZeroish =
+      Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+  Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+  Type* const kIntegerOrMinusZero =
+      Type::Union(kInteger, Type::MinusZero(), zone());
+  Type* const kIntegerOrMinusZeroOrNaN =
+      Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+  Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+  Type* const kPositiveIntegerOrMinusZero =
+      Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+  Type* const kPositiveIntegerOrMinusZeroOrNaN =
+      Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
+
+  Type* const kAdditiveSafeInteger =
+      CreateRange(-4503599627370496.0, 4503599627370496.0);
+  Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
+  Type* const kAdditiveSafeIntegerOrMinusZero =
+      Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
+  Type* const kSafeIntegerOrMinusZero =
+      Type::Union(kSafeInteger, Type::MinusZero(), zone());
+  Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
+
+  // The FixedArray::length property always containts a smi in the range
+  // [0, FixedArray::kMaxLength].
+  Type* const kFixedArrayLengthType = CreateRange(0.0, FixedArray::kMaxLength);
+
+  // The FixedDoubleArray::length property always containts a smi in the range
+  // [0, FixedDoubleArray::kMaxLength].
+  Type* const kFixedDoubleArrayLengthType =
+      CreateRange(0.0, FixedDoubleArray::kMaxLength);
+
+  // The JSArray::length property always contains a tagged number in the range
+  // [0, kMaxUInt32].
+  Type* const kJSArrayLengthType = Type::Unsigned32();
+
+  // The JSTyped::length property always contains a tagged number in the range
+  // [0, kMaxSmiValue].
+  Type* const kJSTypedArrayLengthType = Type::UnsignedSmall();
+
+  // The String::length property always contains a smi in the range
+  // [0, String::kMaxLength].
+  Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
+
+  // The JSDate::day property always contains a tagged number in the range
+  // [1, 31] or NaN.
+  Type* const kJSDateDayType =
+      Type::Union(CreateRange(1, 31.0), Type::NaN(), zone());
+
+  // The JSDate::hour property always contains a tagged number in the range
+  // [0, 23] or NaN.
+  Type* const kJSDateHourType =
+      Type::Union(CreateRange(0, 23.0), Type::NaN(), zone());
+
+  // The JSDate::minute property always contains a tagged number in the range
+  // [0, 59] or NaN.
+  Type* const kJSDateMinuteType =
+      Type::Union(CreateRange(0, 59.0), Type::NaN(), zone());
+
+  // The JSDate::month property always contains a tagged number in the range
+  // [0, 11] or NaN.
+  Type* const kJSDateMonthType =
+      Type::Union(CreateRange(0, 11.0), Type::NaN(), zone());
+
+  // The JSDate::second property always contains a tagged number in the range
+  // [0, 59] or NaN.
+  Type* const kJSDateSecondType = kJSDateMinuteType;
+
+  // The JSDate::value property always contains a tagged number in the range
+  // [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
+  Type* const kJSDateValueType = Type::Union(
+      CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
+      Type::NaN(), zone());
+
+  // The JSDate::weekday property always contains a tagged number in the range
+  // [0, 6] or NaN.
+  Type* const kJSDateWeekdayType =
+      Type::Union(CreateRange(0, 6.0), Type::NaN(), zone());
+
+  // The JSDate::year property always contains a tagged number in the signed
+  // small range or NaN.
+  Type* const kJSDateYearType =
+      Type::Union(Type::SignedSmall(), Type::NaN(), zone());
+
+ private:
+  template <typename T>
+  Type* CreateRange() {
+    return CreateRange(std::numeric_limits<T>::min(),
+                       std::numeric_limits<T>::max());
+  }
+
+  Type* CreateRange(double min, double max) {
+    return Type::Range(min, max, zone());
+  }
+
+  Zone* zone() { return &zone_; }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_TYPE_CACHE_H_
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index 8e7a0f3..a668a48 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -6,8 +6,8 @@
 
 #include "src/assembler.h"
 #include "src/code-stubs.h"
-#include "src/compiler/type-hints.h"
 #include "src/ic/ic-state.h"
+#include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
@@ -15,17 +15,21 @@
 
 namespace {
 
-BinaryOperationHint ToBinaryOperationHint(BinaryOpICState::Kind kind) {
+BinaryOperationHint ToBinaryOperationHint(Token::Value op,
+                                          BinaryOpICState::Kind kind) {
   switch (kind) {
     case BinaryOpICState::NONE:
       return BinaryOperationHint::kNone;
     case BinaryOpICState::SMI:
       return BinaryOperationHint::kSignedSmall;
     case BinaryOpICState::INT32:
-      return BinaryOperationHint::kSigned32;
+      return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
+                 ? BinaryOperationHint::kNumberOrOddball
+                 : BinaryOperationHint::kSigned32;
     case BinaryOpICState::NUMBER:
       return BinaryOperationHint::kNumberOrOddball;
     case BinaryOpICState::STRING:
+      return BinaryOperationHint::kString;
     case BinaryOpICState::GENERIC:
       return BinaryOperationHint::kAny;
   }
@@ -66,7 +70,7 @@
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
-  *hint = ToBinaryOperationHint(state.kind());
+  *hint = ToBinaryOperationHint(state.op(), state.kind());
   return true;
 }
 
@@ -132,20 +136,6 @@
   return new (zone()) TypeHintAnalysis(infos, zone());
 }
 
-// Helper function to transform the feedback to BinaryOperationHint.
-BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
-  switch (type_feedback) {
-    case BinaryOperationFeedback::kSignedSmall:
-      return BinaryOperationHint::kSignedSmall;
-    case BinaryOperationFeedback::kNumber:
-      return BinaryOperationHint::kNumberOrOddball;
-    case BinaryOperationFeedback::kAny:
-    default:
-      return BinaryOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return BinaryOperationHint::kNone;
-}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
index e48938a..354f894 100644
--- a/src/compiler/type-hint-analyzer.h
+++ b/src/compiler/type-hint-analyzer.h
@@ -5,9 +5,9 @@
 #ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
 #define V8_COMPILER_TYPE_HINT_ANALYZER_H_
 
-#include "src/compiler/type-hints.h"
 #include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/type-hints.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -50,8 +50,6 @@
   DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
 };
 
-BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/typed-optimization.cc b/src/compiler/typed-optimization.cc
new file mode 100644
index 0000000..c5e8648
--- /dev/null
+++ b/src/compiler/typed-optimization.cc
@@ -0,0 +1,253 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/typed-optimization.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+TypedOptimization::TypedOptimization(Editor* editor,
+                                     CompilationDependencies* dependencies,
+                                     Flags flags, JSGraph* jsgraph)
+    : AdvancedReducer(editor),
+      dependencies_(dependencies),
+      flags_(flags),
+      jsgraph_(jsgraph),
+      true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
+      false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+      type_cache_(TypeCache::Get()) {}
+
+TypedOptimization::~TypedOptimization() {}
+
+Reduction TypedOptimization::Reduce(Node* node) {
+  // Check if the output type is a singleton.  In that case we already know the
+  // result value and can simply replace the node if it's eliminable.
+  if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
+      node->op()->HasProperty(Operator::kEliminatable)) {
+    // TODO(v8:5303): We must not eliminate FinishRegion here. This special
+    // case can be removed once we have separate operators for value and
+    // effect regions.
+    if (node->opcode() == IrOpcode::kFinishRegion) return NoChange();
+    // We can only constant-fold nodes here, that are known to not cause any
+    // side-effect, may it be a JavaScript observable side-effect or a possible
+    // eager deoptimization exit (i.e. {node} has an operator that doesn't have
+    // the Operator::kNoDeopt property).
+    Type* upper = NodeProperties::GetType(node);
+    if (upper->IsInhabited()) {
+      if (upper->IsConstant()) {
+        Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::MinusZero())) {
+        Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::NaN())) {
+        Node* replacement = jsgraph()->NaNConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::Null())) {
+        Node* replacement = jsgraph()->NullConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::PlainNumber()) &&
+                 upper->Min() == upper->Max()) {
+        Node* replacement = jsgraph()->Constant(upper->Min());
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      } else if (upper->Is(Type::Undefined())) {
+        Node* replacement = jsgraph()->UndefinedConstant();
+        ReplaceWithValue(node, replacement);
+        return Changed(replacement);
+      }
+    }
+  }
+  switch (node->opcode()) {
+    case IrOpcode::kCheckMaps:
+      return ReduceCheckMaps(node);
+    case IrOpcode::kCheckString:
+      return ReduceCheckString(node);
+    case IrOpcode::kLoadField:
+      return ReduceLoadField(node);
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc:
+      return ReduceNumberRoundop(node);
+    case IrOpcode::kPhi:
+      return ReducePhi(node);
+    case IrOpcode::kSelect:
+      return ReduceSelect(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+  if (object_type->IsConstant() &&
+      object_type->AsConstant()->Value()->IsHeapObject()) {
+    Handle<Map> object_map(
+        Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+    if (object_map->is_stable()) return object_map;
+  }
+  return MaybeHandle<Map>();
+}
+
+}  // namespace
+
+Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
+  // The CheckMaps(o, ...map...) can be eliminated if map is stable,
+  // o has type Constant(object) and map == object->map, and either
+  //  (1) map cannot transition further, or
+  //  (2) we can add a code dependency on the stability of map
+  //      (to guard the Constant type information).
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Type* const object_type = NodeProperties::GetType(object);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Handle<Map> object_map;
+  if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+    for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+      Node* const map = NodeProperties::GetValueInput(node, i);
+      Type* const map_type = NodeProperties::GetType(map);
+      if (map_type->IsConstant() &&
+          map_type->AsConstant()->Value().is_identical_to(object_map)) {
+        if (object_map->CanTransition()) {
+          dependencies()->AssumeMapStable(object_map);
+        }
+        return Replace(effect);
+      }
+    }
+  }
+  return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckString(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(Type::String())) {
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
+Reduction TypedOptimization::ReduceLoadField(Node* node) {
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Type* const object_type = NodeProperties::GetType(object);
+  FieldAccess const& access = FieldAccessOf(node->op());
+  if (access.base_is_tagged == kTaggedBase &&
+      access.offset == HeapObject::kMapOffset) {
+    // We can replace LoadField[Map](o) with map if is stable, and
+    // o has type Constant(object) and map == object->map, and either
+    //  (1) map cannot transition further, or
+    //  (2) deoptimization is enabled and we can add a code dependency on the
+    //      stability of map (to guard the Constant type information).
+    Handle<Map> object_map;
+    if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+      if (object_map->CanTransition()) {
+        if (flags() & kDeoptimizationEnabled) {
+          dependencies()->AssumeMapStable(object_map);
+        } else {
+          return NoChange();
+        }
+      }
+      Node* const value = jsgraph()->HeapConstant(object_map);
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    }
+  }
+  return NoChange();
+}
+
+Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Replace(input);
+  }
+  return NoChange();
+}
+
+Reduction TypedOptimization::ReducePhi(Node* node) {
+  // Try to narrow the type of the Phi {node}, which might be more precise now
+  // after lowering based on types, i.e. a SpeculativeNumberAdd has a more
+  // precise type than the JSAdd that was in the graph when the Typer was run.
+  DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+  int arity = node->op()->ValueInputCount();
+  Type* type = NodeProperties::GetType(node->InputAt(0));
+  for (int i = 1; i < arity; ++i) {
+    type = Type::Union(type, NodeProperties::GetType(node->InputAt(i)),
+                       graph()->zone());
+  }
+  Type* const node_type = NodeProperties::GetType(node);
+  if (!node_type->Is(type)) {
+    type = Type::Intersect(node_type, type, graph()->zone());
+    NodeProperties::SetType(node, type);
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+Reduction TypedOptimization::ReduceSelect(Node* node) {
+  DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+  Node* const condition = NodeProperties::GetValueInput(node, 0);
+  Type* const condition_type = NodeProperties::GetType(condition);
+  Node* const vtrue = NodeProperties::GetValueInput(node, 1);
+  Type* const vtrue_type = NodeProperties::GetType(vtrue);
+  Node* const vfalse = NodeProperties::GetValueInput(node, 2);
+  Type* const vfalse_type = NodeProperties::GetType(vfalse);
+  if (condition_type->Is(true_type_)) {
+    // Select(condition:true, vtrue, vfalse) => vtrue
+    return Replace(vtrue);
+  }
+  if (condition_type->Is(false_type_)) {
+    // Select(condition:false, vtrue, vfalse) => vfalse
+    return Replace(vfalse);
+  }
+  if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
+    // Select(condition, vtrue:true, vfalse:false) => condition
+    return Replace(condition);
+  }
+  if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
+    // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
+  }
+  // Try to narrow the type of the Select {node}, which might be more precise
+  // now after lowering based on types.
+  Type* type = Type::Union(vtrue_type, vfalse_type, graph()->zone());
+  Type* const node_type = NodeProperties::GetType(node);
+  if (!node_type->Is(type)) {
+    type = Type::Intersect(node_type, type, graph()->zone());
+    NodeProperties::SetType(node, type);
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+Factory* TypedOptimization::factory() const { return isolate()->factory(); }
+
+Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
+
+Isolate* TypedOptimization::isolate() const { return jsgraph()->isolate(); }
+
+SimplifiedOperatorBuilder* TypedOptimization::simplified() const {
+  return jsgraph()->simplified();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/typed-optimization.h b/src/compiler/typed-optimization.h
new file mode 100644
index 0000000..54d780c
--- /dev/null
+++ b/src/compiler/typed-optimization.h
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPED_OPTIMIZATION_H_
+#define V8_COMPILER_TYPED_OPTIMIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class Isolate;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+class SimplifiedOperatorBuilder;
+class TypeCache;
+
+class TypedOptimization final : public AdvancedReducer {
+ public:
+  // Flags that control the mode of operation.
+  enum Flag {
+    kNoFlags = 0u,
+    kDeoptimizationEnabled = 1u << 0,
+  };
+  typedef base::Flags<Flag> Flags;
+
+  TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
+                    Flags flags, JSGraph* jsgraph);
+  ~TypedOptimization();
+
+  Reduction Reduce(Node* node) final;
+
+ private:
+  Reduction ReduceCheckMaps(Node* node);
+  Reduction ReduceCheckString(Node* node);
+  Reduction ReduceLoadField(Node* node);
+  Reduction ReduceNumberRoundop(Node* node);
+  Reduction ReducePhi(Node* node);
+  Reduction ReduceSelect(Node* node);
+
+  CompilationDependencies* dependencies() const { return dependencies_; }
+  Factory* factory() const;
+  Flags flags() const { return flags_; }
+  Graph* graph() const;
+  Isolate* isolate() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  SimplifiedOperatorBuilder* simplified() const;
+
+  CompilationDependencies* const dependencies_;
+  Flags const flags_;
+  JSGraph* const jsgraph_;
+  Type* const true_type_;
+  Type* const false_type_;
+  TypeCache const& type_cache_;
+
+  DISALLOW_COPY_AND_ASSIGN(TypedOptimization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(TypedOptimization::Flags)
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_TYPED_OPTIMIZATION_H_
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 0d07053..ec1197b 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -16,8 +16,8 @@
 #include "src/compiler/node.h"
 #include "src/compiler/operation-typer.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -88,8 +88,6 @@
       COMMON_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
       SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
-      MACHINE_OP_LIST(DECLARE_CASE)
-      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
       JS_OBJECT_OP_LIST(DECLARE_CASE)
       JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -131,6 +129,8 @@
       DECLARE_CASE(End)
       SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
+      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
+      MACHINE_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
       break;
     }
@@ -151,8 +151,6 @@
       COMMON_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
       SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
-      MACHINE_OP_LIST(DECLARE_CASE)
-      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
       JS_OBJECT_OP_LIST(DECLARE_CASE)
       JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -194,6 +192,8 @@
       DECLARE_CASE(End)
       SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
+      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
+      MACHINE_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
       break;
     }
@@ -214,8 +214,6 @@
   COMMON_OP_LIST(DECLARE_METHOD)
   SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_METHOD)
   SIMPLIFIED_OTHER_OP_LIST(DECLARE_METHOD)
-  MACHINE_OP_LIST(DECLARE_METHOD)
-  MACHINE_SIMD_OP_LIST(DECLARE_METHOD)
   JS_OP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
@@ -229,7 +227,6 @@
     return TypeOrNone(operand_node);
   }
 
-  Type* WrapContextTypeForInput(Node* node);
   Type* Weaken(Node* node, Type* current_type, Type* previous_type);
 
   Zone* zone() { return typer_->zone(); }
@@ -298,6 +295,7 @@
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
   static Type* StringFromCharCodeTyper(Type*, Typer*);
+  static Type* StringFromCodePointTyper(Type*, Typer*);
 
   Reduction UpdateType(Node* node, Type* current) {
     if (NodeProperties::IsTyped(node)) {
@@ -426,8 +424,8 @@
   if (type->Is(Type::Boolean())) return type;
   if (type->Is(t->falsish_)) return t->singleton_false_;
   if (type->Is(t->truish_)) return t->singleton_true_;
-  if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
-    return t->singleton_true_;  // Ruled out nan, -0 and +0.
+  if (type->Is(Type::Number())) {
+    return t->operation_typer()->NumberToBoolean(type);
   }
   return Type::Boolean();
 }
@@ -519,8 +517,7 @@
 
 
 Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
-  if (type->Is(Type::TaggedSigned())) return t->singleton_true_;
-  if (type->Is(Type::TaggedPointer())) return t->singleton_false_;
+  if (!type->Maybe(Type::SignedSmall())) return t->singleton_false_;
   return Type::Boolean();
 }
 
@@ -554,11 +551,15 @@
 
 Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeRetain(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
 
 Type* Typer::Visitor::TypeInt32Constant(Node* node) {
   double number = OpParameter<int32_t>(node);
   return Type::Intersect(Type::Range(number, number, zone()),
-                         Type::UntaggedIntegral32(), zone());
+                         Type::Integral32(), zone());
 }
 
 
@@ -567,24 +568,25 @@
   return Type::Internal();  // TODO(rossberg): Add int64 bitset type?
 }
 
-// TODO(gdeepti) : Fix this to do something meaningful.
 Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
-  return Type::Internal();
+  UNREACHABLE();
+  return nullptr;
 }
 
 Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
-  return Type::Internal();
+  UNREACHABLE();
+  return nullptr;
 }
 
 Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
-  return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
-                         Type::UntaggedFloat32(), zone());
+  UNREACHABLE();
+  return nullptr;
 }
 
 
 Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
-  return Type::Intersect(Type::Of(OpParameter<double>(node), zone()),
-                         Type::UntaggedFloat64(), zone());
+  UNREACHABLE();
+  return nullptr;
 }
 
 
@@ -633,16 +635,22 @@
   // do not apply and we cannot do anything).
   if (!initial_type->Is(typer_->cache_.kInteger) ||
       !increment_type->Is(typer_->cache_.kInteger)) {
-    // Fallback to normal phi typing.
-    Type* type = Operand(node, 0);
-    for (int i = 1; i < arity; ++i) {
+    // Fallback to normal phi typing, but ensure monotonicity.
+    // (Unfortunately, without baking in the previous type, monotonicity might
+    // be violated because we might not yet have retyped the incrementing
+    // operation even though the increment's type might been already reflected
+    // in the induction variable phi.)
+    Type* type = NodeProperties::IsTyped(node) ? NodeProperties::GetType(node)
+                                               : Type::None();
+    for (int i = 0; i < arity; ++i) {
       type = Type::Union(type, Operand(node, i), zone());
     }
     return type;
   }
   // If we do not have enough type information for the initial value or
   // the increment, just return the initial value's type.
-  if (!initial_type->IsInhabited() || !increment_type->IsInhabited()) {
+  if (!initial_type->IsInhabited() ||
+      increment_type->Is(typer_->cache_.kSingletonZero)) {
     return initial_type;
   }
 
@@ -1219,16 +1227,24 @@
 
 Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
 
+Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+  return Type::Boolean();
+}
+
 // JS context operators.
 
 
 Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
   ContextAccess const& access = ContextAccessOf(node->op());
-  if (access.index() == Context::EXTENSION_INDEX) {
-    return Type::TaggedPointer();
+  switch (access.index()) {
+    case Context::PREVIOUS_INDEX:
+    case Context::NATIVE_CONTEXT_INDEX:
+      return Type::OtherInternal();
+    case Context::CLOSURE_INDEX:
+      return Type::Function();
+    default:
+      return Type::Any();
   }
-  // Since contexts are mutable, we just return the top.
-  return Type::Any();
 }
 
 
@@ -1238,42 +1254,26 @@
 }
 
 
-Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
-  Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
-  if (outer->Is(Type::None())) {
-    return Type::None();
-  } else {
-    DCHECK(outer->Maybe(Type::OtherInternal()));
-    return Type::Context(outer, zone());
-  }
-}
-
-
 Type* Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
-  return WrapContextTypeForInput(node);
+  return Type::OtherInternal();
 }
 
-
 Type* Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
-  return WrapContextTypeForInput(node);
+  return Type::OtherInternal();
 }
 
-
 Type* Typer::Visitor::TypeJSCreateWithContext(Node* node) {
-  return WrapContextTypeForInput(node);
+  return Type::OtherInternal();
 }
 
-
 Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
-  return WrapContextTypeForInput(node);
+  return Type::OtherInternal();
 }
 
-
 Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
-  return WrapContextTypeForInput(node);
+  return Type::OtherInternal();
 }
 
-
 // JS other operators.
 
 
@@ -1283,16 +1283,13 @@
 
 
 Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
-  if (fun->IsFunction()) {
-    return fun->AsFunction()->Result();
-  }
   if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(fun->AsConstant()->Value());
     if (function->shared()->HasBuiltinFunctionId()) {
       switch (function->shared()->builtin_function_id()) {
         case kMathRandom:
-          return Type::OrderedNumber();
+          return Type::PlainNumber();
         case kMathFloor:
         case kMathCeil:
         case kMathRound:
@@ -1321,7 +1318,7 @@
         case kMathTan:
           return Type::Number();
         case kMathSign:
-          return t->cache_.kMinusOneToOne;
+          return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
         // Binary math functions.
         case kMathAtan2:
         case kMathPow:
@@ -1332,7 +1329,32 @@
           return Type::Signed32();
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
+        // Date functions.
+        case kDateGetDate:
+          return t->cache_.kJSDateDayType;
+        case kDateGetDay:
+          return t->cache_.kJSDateWeekdayType;
+        case kDateGetFullYear:
+          return t->cache_.kJSDateYearType;
+        case kDateGetHours:
+          return t->cache_.kJSDateHourType;
+        case kDateGetMilliseconds:
+          return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
+                             t->zone());
+        case kDateGetMinutes:
+          return t->cache_.kJSDateMinuteType;
+        case kDateGetMonth:
+          return t->cache_.kJSDateMonthType;
+        case kDateGetSeconds:
+          return t->cache_.kJSDateSecondType;
+        case kDateGetTime:
+          return t->cache_.kJSDateValueType;
         // Number functions.
+        case kNumberIsFinite:
+        case kNumberIsInteger:
+        case kNumberIsNaN:
+        case kNumberIsSafeInteger:
+          return Type::Boolean();
         case kNumberParseInt:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         case kNumberToString:
@@ -1348,15 +1370,25 @@
         case kStringToLowerCase:
         case kStringToUpperCase:
           return Type::String();
+
+        case kStringIteratorNext:
+          return Type::OtherObject();
+
         // Array functions.
         case kArrayIndexOf:
         case kArrayLastIndexOf:
           return Type::Range(-1, kMaxSafeInteger, t->zone());
         case kArrayPush:
           return t->cache_.kPositiveSafeInteger;
+
         // Object functions.
         case kObjectHasOwnProperty:
           return Type::Boolean();
+
+        // Function functions.
+        case kFunctionHasInstance:
+          return Type::Boolean();
+
         // Global functions.
         case kGlobalDecodeURI:
         case kGlobalDecodeURIComponent:
@@ -1365,6 +1397,9 @@
         case kGlobalEscape:
         case kGlobalUnescape:
           return Type::String();
+        case kGlobalIsFinite:
+        case kGlobalIsNaN:
+          return Type::Boolean();
         default:
           break;
       }
@@ -1432,21 +1467,13 @@
 
 Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
   STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
-  Factory* const f = isolate()->factory();
-  Type* const cache_type = Type::Union(
-      typer_->cache_.kSmi, Type::Class(f->meta_map(), zone()), zone());
-  Type* const cache_array = Type::Class(f->fixed_array_map(), zone());
+  Type* const cache_type =
+      Type::Union(typer_->cache_.kSmi, Type::OtherInternal(), zone());
+  Type* const cache_array = Type::OtherInternal();
   Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
   return Type::Tuple(cache_type, cache_array, cache_length, zone());
 }
 
-Type* Typer::Visitor::TypeJSForInDone(Node* node) { return Type::Boolean(); }
-
-Type* Typer::Visitor::TypeJSForInStep(Node* node) {
-  STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
-  return Type::Range(1, FixedArray::kMaxLength + 1, zone());
-}
-
 
 Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
 
@@ -1541,6 +1568,19 @@
   return Type::String();
 }
 
+Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
+  type = NumberToUint32(ToNumber(type, t), t);
+  Factory* f = t->isolate()->factory();
+  double min = type->Min();
+  double max = type->Max();
+  if (min == max) {
+    uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
+    Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
+    return Type::Constant(string, t->zone());
+  }
+  return Type::String();
+}
+
 Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
   // TODO(bmeurer): We could do better here based on inputs.
   return Type::Range(0, kMaxUInt16, zone());
@@ -1550,17 +1590,31 @@
   return TypeUnaryOp(node, StringFromCharCodeTyper);
 }
 
+Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
+  return TypeUnaryOp(node, StringFromCodePointTyper);
+}
+
 Type* Typer::Visitor::TypeCheckBounds(Node* node) {
   Type* index = Operand(node, 0);
   Type* length = Operand(node, 1);
   index = Type::Intersect(index, Type::Integral32(), zone());
   if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
   double min = std::max(index->Min(), 0.0);
-  double max = std::min(index->Max(), length->Min() - 1);
+  double max = std::min(index->Max(), length->Max() - 1);
   if (max < min) return Type::None();
   return Type::Range(min, max, zone());
 }
 
+Type* Typer::Visitor::TypeCheckHeapObject(Node* node) {
+  Type* type = Operand(node, 0);
+  return type;
+}
+
+Type* Typer::Visitor::TypeCheckIf(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeCheckMaps(Node* node) {
   UNREACHABLE();
   return nullptr;
@@ -1571,26 +1625,16 @@
   return Type::Intersect(arg, Type::Number(), zone());
 }
 
+Type* Typer::Visitor::TypeCheckSmi(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::SignedSmall(), zone());
+}
+
 Type* Typer::Visitor::TypeCheckString(Node* node) {
   Type* arg = Operand(node, 0);
   return Type::Intersect(arg, Type::String(), zone());
 }
 
-Type* Typer::Visitor::TypeCheckIf(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
-  Type* arg = Operand(node, 0);
-  return Type::Intersect(arg, Type::TaggedPointer(), zone());
-}
-
-Type* Typer::Visitor::TypeCheckTaggedSigned(Node* node) {
-  Type* arg = Operand(node, 0);
-  return Type::Intersect(arg, typer_->cache_.kSmi, zone());
-}
-
 Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
   Type* type = Operand(node, 0);
   return type;
@@ -1612,7 +1656,7 @@
   return type;
 }
 
-Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
+Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeLoadField(Node* node) {
   return FieldAccessOf(node->op()).type;
@@ -1697,652 +1741,13 @@
   return TypeUnaryOp(node, ObjectIsUndetectable);
 }
 
-
-// Machine operators.
-
-Type* Typer::Visitor::TypeDebugBreak(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeRetain(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-Type* Typer::Visitor::TypeUnsafePointerAdd(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeStore(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-
-Type* Typer::Visitor::TypeWord32And(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Or(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Xor(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Shl(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Shr(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Sar(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Ror(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
-  return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeWord32ReverseBytes(Node* node) {
-  return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
-  return Type::Integral32();
-}
-
-
-Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Or(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Xor(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Shl(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Shr(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeWord64ReverseBytes(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt32Add(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt32Sub(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
-
-Type* Typer::Visitor::TypeInt32MulWithOverflow(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
-
-
-Type* Typer::Visitor::TypeInt32Div(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32Mod(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
+Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
   return Type::Boolean();
 }
 
-
-Type* Typer::Visitor::TypeUint32Div(Node* node) { return Type::Unsigned32(); }
-
-
-Type* Typer::Visitor::TypeUint32LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint32Mod(Node* node) { return Type::Unsigned32(); }
-
-
-Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
-  return Type::Unsigned32();
-}
-
-
-Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64Mod(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint64Div(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeUint64LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeBitcastWordToTagged(Node* node) {
-  return Type::TaggedPointer();
-}
-
-Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
-  return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
-  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
-                         zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat64ToUint32(Node* node) {
-  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
-                         zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeTruncateFloat32ToUint32(Node* node) {
-  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
-                         zone());
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat64ToInt64(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat32ToUint64(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat64ToUint64(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeFloat64SilenceNaN(Node* node) {
-  return Type::UntaggedFloat64();
-}
-
-Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
-  return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeImpossibleToWord32(Node* node) {
-  return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToWord64(Node* node) {
-  return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToFloat32(Node* node) {
-  return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToFloat64(Node* node) {
-  return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToTagged(Node* node) {
-  return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToBit(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
-  return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat64ToWord32(Node* node) {
-  return Type::Intersect(Type::Integral32(), Type::UntaggedIntegral32(),
-                         zone());
-}
-
-Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeRoundFloat64ToInt32(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint32ToFloat32(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint64ToFloat64(Node* node) {
-  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
-  // TODO(turbofan): We should be able to infer a better type here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32Sqrt(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeFloat32LessThan(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
-  return Type::Boolean();
-}
-
-Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Div(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Mod(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Max(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Min(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Abs(Node* node) {
-  // TODO(turbofan): We should be able to infer a better type here.
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeFloat64Acos(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Acosh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Asin(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Asinh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cosh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Pow(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sinh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Tanh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeFloat64LessThan(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
-  return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundDown(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundUp(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundTiesEven(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTiesEven(Node* node) {
-  // TODO(sigurds): We could have a tighter bound here.
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
-  return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeLoadStackPointer(Node* node) {
-  return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeLoadFramePointer(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
-  return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeUnalignedLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeUnalignedStore(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeCheckedStore(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeAtomicStore(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt32PairMul(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairShl(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
-
-// SIMD type methods.
-
-#define SIMD_RETURN_SIMD(Name) \
-  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
-MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
-MACHINE_SIMD_GENERIC_OP_LIST(SIMD_RETURN_SIMD)
-#undef SIMD_RETURN_SIMD
-
-#define SIMD_RETURN_NUM(Name) \
-  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Number(); }
-MACHINE_SIMD_RETURN_NUM_OP_LIST(SIMD_RETURN_NUM)
-#undef SIMD_RETURN_NUM
-
-#define SIMD_RETURN_BOOL(Name) \
-  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Boolean(); }
-MACHINE_SIMD_RETURN_BOOL_OP_LIST(SIMD_RETURN_BOOL)
-#undef SIMD_RETURN_BOOL
-
 // Heap constants.
 
 Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
-  if (value->IsJSTypedArray()) {
-    switch (JSTypedArray::cast(*value)->type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
-  case kExternal##Type##Array:                          \
-    return typer_->cache_.k##Type##Array;
-      TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-    }
-  }
   if (Type::IsInteger(*value)) {
     return Type::Range(value->Number(), value->Number(), zone());
   }
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index d4d5744..875b483 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -7,18 +7,13 @@
 
 #include "src/compiler/graph.h"
 #include "src/compiler/operation-typer.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class TypeCache;
-
 namespace compiler {
 
+// Forward declarations.
 class LoopVariableOptimizer;
-class OperationTyper;
 
 class Typer {
  public:
diff --git a/src/types.cc b/src/compiler/types.cc
similarity index 65%
copy from src/types.cc
copy to src/compiler/types.cc
index c978dac..43d2f80 100644
--- a/src/types.cc
+++ b/src/compiler/types.cc
@@ -4,14 +4,14 @@
 
 #include <iomanip>
 
-#include "src/types.h"
+#include "src/compiler/types.h"
 
 #include "src/handles-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
-
+namespace compiler {
 
 // NOTE: If code is marked as being a "shortcut", this means that removing
 // the code won't affect the semantics of the surrounding function definition.
@@ -58,23 +58,21 @@
 
 bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
   DisallowHeapAllocation no_allocation;
-  return IsInteger(*rhs->Value()) &&
-         lhs->Min() <= rhs->Value()->Number() &&
+  return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
          rhs->Value()->Number() <= lhs->Max();
 }
 
 bool Type::Contains(RangeType* range, i::Object* val) {
   DisallowHeapAllocation no_allocation;
-  return IsInteger(val) &&
-         range->Min() <= val->Number() && val->Number() <= range->Max();
+  return IsInteger(val) && range->Min() <= val->Number() &&
+         val->Number() <= range->Max();
 }
 
-
 // -----------------------------------------------------------------------------
 // Min and Max computation.
 
 double Type::Min() {
-  DCHECK(this->SemanticIs(Number()));
+  DCHECK(this->Is(Number()));
   if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
   if (this->IsUnion()) {
     double min = +V8_INFINITY;
@@ -90,7 +88,7 @@
 }
 
 double Type::Max() {
-  DCHECK(this->SemanticIs(Number()));
+  DCHECK(this->Is(Number()));
   if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
   if (this->IsUnion()) {
     double max = -V8_INFINITY;
@@ -105,11 +103,9 @@
   return 0;
 }
 
-
 // -----------------------------------------------------------------------------
 // Glb and lub computation.
 
-
 // The largest bitset subsumed by this type.
 Type::bitset BitsetType::Glb(Type* type) {
   DisallowHeapAllocation no_allocation;
@@ -119,17 +115,16 @@
   } else if (type->IsUnion()) {
     SLOW_DCHECK(type->AsUnion()->Wellformed());
     return type->AsUnion()->Get(0)->BitsetGlb() |
-           SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb());  // Shortcut.
+           type->AsUnion()->Get(1)->BitsetGlb();  // Shortcut.
   } else if (type->IsRange()) {
-    bitset glb = SEMANTIC(
-        BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
-    return glb | REPRESENTATION(type->BitsetLub());
+    bitset glb =
+        BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max());
+    return glb;
   } else {
-    return type->Representation();
+    return kNone;
   }
 }
 
-
 // The smallest bitset subsuming this type, possibly not a proper one.
 Type::bitset BitsetType::Lub(Type* type) {
   DisallowHeapAllocation no_allocation;
@@ -140,16 +135,12 @@
     int bitset = type->AsUnion()->Get(0)->BitsetLub();
     for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
       // Other elements only contribute their semantic part.
-      bitset |= SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
+      bitset |= type->AsUnion()->Get(i)->BitsetLub();
     }
     return bitset;
   }
-  if (type->IsClass()) return type->AsClass()->Lub();
   if (type->IsConstant()) return type->AsConstant()->Lub();
   if (type->IsRange()) return type->AsRange()->Lub();
-  if (type->IsContext()) return kOtherInternal & kTaggedPointer;
-  if (type->IsArray()) return kOtherObject;
-  if (type->IsFunction()) return kFunction;
   if (type->IsTuple()) return kOtherInternal;
   UNREACHABLE();
   return kNone;
@@ -194,10 +185,10 @@
              map == heap->arguments_marker_map() ||
              map == heap->optimized_out_map() ||
              map == heap->stale_register_map());
-      return kOtherInternal & kTaggedPointer;
+      return kOtherInternal;
     }
     case HEAP_NUMBER_TYPE:
-      return kNumber & kTaggedPointer;
+      return kNumber;
     case SIMD128_VALUE_TYPE:
       return kSimd;
     case JS_OBJECT_TYPE:
@@ -214,7 +205,6 @@
     case JS_DATE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -224,6 +214,7 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
+    case JS_STRING_ITERATOR_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -250,10 +241,11 @@
     case SCRIPT_TYPE:
     case CODE_TYPE:
     case PROPERTY_CELL_TYPE:
-      return kOtherInternal & kTaggedPointer;
+    case MODULE_TYPE:
+      return kOtherInternal;
 
     // Remaining instance types are unsupported for now. If any of them do
-    // require bit set types, they should get kOtherInternal & kTaggedPointer.
+    // require bit set types, they should get kOtherInternal.
     case MUTABLE_HEAP_NUMBER_TYPE:
     case FREE_SPACE_TYPE:
 #define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -273,12 +265,13 @@
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
     case BOX_TYPE:
+    case PROMISE_CONTAINER_TYPE:
     case DEBUG_INFO_TYPE:
     case BREAK_POINT_INFO_TYPE:
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
-    case SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE:
+    case CONTEXT_EXTENSION_TYPE:
       UNREACHABLE();
       return kNone;
   }
@@ -289,8 +282,7 @@
 Type::bitset BitsetType::Lub(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   if (value->IsNumber()) {
-    return Lub(value->Number()) &
-        (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
+    return Lub(value->Number());
   }
   return Lub(i::HeapObject::cast(value)->map());
 }
@@ -303,7 +295,6 @@
   return kOtherNumber;
 }
 
-
 // Minimum values of plain numeric bitsets.
 const BitsetType::Boundary BitsetType::BoundariesArray[] = {
     {kOtherNumber, kPlainNumber, -V8_INFINITY},
@@ -324,12 +315,11 @@
 
 Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
   DisallowHeapAllocation no_allocation;
-  if (!(bits & SEMANTIC(kPlainNumber))) return bits;  // Shortcut.
+  if (!(bits & kPlainNumber)) return bits;  // Shortcut.
   const Boundary* boundaries = Boundaries();
   for (size_t i = 0; i < BoundariesSize(); ++i) {
     DCHECK(BitsetType::Is(boundaries[i].internal, boundaries[i].external));
-    if (bits & SEMANTIC(boundaries[i].internal))
-      bits |= SEMANTIC(boundaries[i].external);
+    if (bits & boundaries[i].internal) bits |= boundaries[i].external;
   }
   return bits;
 }
@@ -341,16 +331,14 @@
 
   for (size_t i = 1; i < BoundariesSize(); ++i) {
     if (min < mins[i].min) {
-      lub |= mins[i-1].internal;
+      lub |= mins[i - 1].internal;
       if (max < mins[i].min) return lub;
     }
   }
   return lub | mins[BoundariesSize() - 1].internal;
 }
 
-Type::bitset BitsetType::NumberBits(bitset bits) {
-  return SEMANTIC(bits & kPlainNumber);
-}
+Type::bitset BitsetType::NumberBits(bitset bits) { return bits & kPlainNumber; }
 
 Type::bitset BitsetType::Glb(double min, double max) {
   DisallowHeapAllocation no_allocation;
@@ -368,16 +356,16 @@
   }
   // OtherNumber also contains float numbers, so it can never be
   // in the greatest lower bound.
-  return glb & ~(SEMANTIC(kOtherNumber));
+  return glb & ~(kOtherNumber);
 }
 
 double BitsetType::Min(bitset bits) {
   DisallowHeapAllocation no_allocation;
-  DCHECK(Is(SEMANTIC(bits), kNumber));
+  DCHECK(Is(bits, kNumber));
   const Boundary* mins = Boundaries();
-  bool mz = SEMANTIC(bits & kMinusZero);
+  bool mz = bits & kMinusZero;
   for (size_t i = 0; i < BoundariesSize(); ++i) {
-    if (Is(SEMANTIC(mins[i].internal), bits)) {
+    if (Is(mins[i].internal, bits)) {
       return mz ? std::min(0.0, mins[i].min) : mins[i].min;
     }
   }
@@ -387,57 +375,29 @@
 
 double BitsetType::Max(bitset bits) {
   DisallowHeapAllocation no_allocation;
-  DCHECK(Is(SEMANTIC(bits), kNumber));
+  DCHECK(Is(bits, kNumber));
   const Boundary* mins = Boundaries();
-  bool mz = SEMANTIC(bits & kMinusZero);
-  if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].internal), bits)) {
+  bool mz = bits & kMinusZero;
+  if (BitsetType::Is(mins[BoundariesSize() - 1].internal, bits)) {
     return +V8_INFINITY;
   }
   for (size_t i = BoundariesSize() - 1; i-- > 0;) {
-    if (Is(SEMANTIC(mins[i].internal), bits)) {
-      return mz ?
-          std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
+    if (Is(mins[i].internal, bits)) {
+      return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
     }
   }
   if (mz) return 0;
   return std::numeric_limits<double>::quiet_NaN();
 }
 
-
 // -----------------------------------------------------------------------------
 // Predicates.
 
 bool Type::SimplyEquals(Type* that) {
   DisallowHeapAllocation no_allocation;
-  if (this->IsClass()) {
-    return that->IsClass()
-        && *this->AsClass()->Map() == *that->AsClass()->Map();
-  }
   if (this->IsConstant()) {
-    return that->IsConstant()
-        && *this->AsConstant()->Value() == *that->AsConstant()->Value();
-  }
-  if (this->IsContext()) {
-    return that->IsContext()
-        && this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
-  }
-  if (this->IsArray()) {
-    return that->IsArray()
-        && this->AsArray()->Element()->Equals(that->AsArray()->Element());
-  }
-  if (this->IsFunction()) {
-    if (!that->IsFunction()) return false;
-    FunctionType* this_fun = this->AsFunction();
-    FunctionType* that_fun = that->AsFunction();
-    if (this_fun->Arity() != that_fun->Arity() ||
-        !this_fun->Result()->Equals(that_fun->Result()) ||
-        !this_fun->Receiver()->Equals(that_fun->Receiver())) {
-      return false;
-    }
-    for (int i = 0, n = this_fun->Arity(); i < n; ++i) {
-      if (!this_fun->Parameter(i)->Equals(that_fun->Parameter(i))) return false;
-    }
-    return true;
+    return that->IsConstant() &&
+           *this->AsConstant()->Value() == *that->AsConstant()->Value();
   }
   if (this->IsTuple()) {
     if (!that->IsTuple()) return false;
@@ -455,11 +415,6 @@
   return false;
 }
 
-Type::bitset Type::Representation() {
-  return REPRESENTATION(this->BitsetLub());
-}
-
-
 // Check if [this] <= [that].
 bool Type::SlowIs(Type* that) {
   DisallowHeapAllocation no_allocation;
@@ -473,34 +428,10 @@
     return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
   }
 
-  // Check the representations.
-  if (!BitsetType::Is(Representation(), that->Representation())) {
-    return false;
-  }
-
-  // Check the semantic part.
-  return SemanticIs(that);
-}
-
-
-// Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
-// should be independent of the representation axis of the types.
-bool Type::SemanticIs(Type* that) {
-  DisallowHeapAllocation no_allocation;
-
-  if (this == that) return true;
-
-  if (that->IsBitset()) {
-    return BitsetType::Is(SEMANTIC(this->BitsetLub()), that->AsBitset());
-  }
-  if (this->IsBitset()) {
-    return BitsetType::Is(SEMANTIC(this->AsBitset()), that->BitsetGlb());
-  }
-
   // (T1 \/ ... \/ Tn) <= T  if  (T1 <= T) /\ ... /\ (Tn <= T)
   if (this->IsUnion()) {
     for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-      if (!this->AsUnion()->Get(i)->SemanticIs(that)) return false;
+      if (!this->AsUnion()->Get(i)->Is(that)) return false;
     }
     return true;
   }
@@ -508,7 +439,7 @@
   // T <= (T1 \/ ... \/ Tn)  if  (T <= T1) \/ ... \/ (T <= Tn)
   if (that->IsUnion()) {
     for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
-      if (this->SemanticIs(that->AsUnion()->Get(i))) return true;
+      if (this->Is(that->AsUnion()->Get(i))) return true;
       if (i > 1 && this->IsRange()) return false;  // Shortcut.
     }
     return false;
@@ -524,72 +455,17 @@
   return this->SimplyEquals(that);
 }
 
-// Most precise _current_ type of a value (usually its class).
-Type* Type::NowOf(i::Object* value, Zone* zone) {
-  if (value->IsSmi() ||
-      i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
-    return Of(value, zone);
-  }
-  return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
-}
-
-bool Type::NowContains(i::Object* value) {
-  DisallowHeapAllocation no_allocation;
-  if (this->IsAny()) return true;
-  if (value->IsHeapObject()) {
-    i::Map* map = i::HeapObject::cast(value)->map();
-    for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
-      if (*it.Current() == map) return true;
-    }
-  }
-  return this->Contains(value);
-}
-
-bool Type::NowIs(Type* that) {
-  DisallowHeapAllocation no_allocation;
-
-  // TODO(rossberg): this is incorrect for
-  //   Union(Constant(V), T)->NowIs(Class(M))
-  // but fuzzing does not cover that!
-  if (this->IsConstant()) {
-    i::Object* object = *this->AsConstant()->Value();
-    if (object->IsHeapObject()) {
-      i::Map* map = i::HeapObject::cast(object)->map();
-      for (Iterator<i::Map> it = that->Classes(); !it.Done(); it.Advance()) {
-        if (*it.Current() == map) return true;
-      }
-    }
-  }
-  return this->Is(that);
-}
-
-
-// Check if [this] contains only (currently) stable classes.
-bool Type::NowStable() {
-  DisallowHeapAllocation no_allocation;
-  return !this->IsClass() || this->AsClass()->Map()->is_stable();
-}
-
-
 // Check if [this] and [that] overlap.
 bool Type::Maybe(Type* that) {
   DisallowHeapAllocation no_allocation;
 
-  // Take care of the representation part (and also approximate
-  // the semantic part).
   if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
     return false;
 
-  return SemanticMaybe(that);
-}
-
-bool Type::SemanticMaybe(Type* that) {
-  DisallowHeapAllocation no_allocation;
-
   // (T1 \/ ... \/ Tn) overlaps T  if  (T1 overlaps T) \/ ... \/ (Tn overlaps T)
   if (this->IsUnion()) {
     for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-      if (this->AsUnion()->Get(i)->SemanticMaybe(that)) return true;
+      if (this->AsUnion()->Get(i)->Maybe(that)) return true;
     }
     return false;
   }
@@ -597,18 +473,13 @@
   // T overlaps (T1 \/ ... \/ Tn)  if  (T overlaps T1) \/ ... \/ (T overlaps Tn)
   if (that->IsUnion()) {
     for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
-      if (this->SemanticMaybe(that->AsUnion()->Get(i))) return true;
+      if (this->Maybe(that->AsUnion()->Get(i))) return true;
     }
     return false;
   }
 
-  if (!BitsetType::SemanticIsInhabited(this->BitsetLub() & that->BitsetLub()))
-    return false;
-
   if (this->IsBitset() && that->IsBitset()) return true;
 
-  if (this->IsClass() != that->IsClass()) return true;
-
   if (this->IsRange()) {
     if (that->IsConstant()) {
       return Contains(this->AsRange(), that->AsConstant());
@@ -627,7 +498,7 @@
     }
   }
   if (that->IsRange()) {
-    return that->SemanticMaybe(this);  // This case is handled above.
+    return that->Maybe(this);  // This case is handled above.
   }
 
   if (this->IsBitset() || that->IsBitset()) return true;
@@ -635,7 +506,6 @@
   return this->SimplyEquals(that);
 }
 
-
 // Return the range in [this], or [NULL].
 Type* Type::GetRange() {
   DisallowHeapAllocation no_allocation;
@@ -646,18 +516,6 @@
   return NULL;
 }
 
-bool Type::Contains(i::Object* value) {
-  DisallowHeapAllocation no_allocation;
-  for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
-    if (*it.Current() == value) return true;
-  }
-  if (IsInteger(value)) {
-    Type* range = this->GetRange();
-    if (range != NULL && Contains(range->AsRange(), value)) return true;
-  }
-  return BitsetType::New(BitsetType::Lub(value))->Is(this);
-}
-
 bool UnionType::Wellformed() {
   DisallowHeapAllocation no_allocation;
   // This checks the invariants of the union representation:
@@ -668,7 +526,7 @@
   // 5. No element (except the bitset) is a subtype of any other.
   // 6. If there is a range, then the bitset type does not contain
   //    plain number bits.
-  DCHECK(this->Length() >= 2);  // (1)
+  DCHECK(this->Length() >= 2);       // (1)
   DCHECK(this->Get(0)->IsBitset());  // (2a)
 
   for (int i = 0; i < this->Length(); ++i) {
@@ -676,8 +534,7 @@
     if (i != 1) DCHECK(!this->Get(i)->IsRange());   // (3)
     DCHECK(!this->Get(i)->IsUnion());               // (4)
     for (int j = 0; j < this->Length(); ++j) {
-      if (i != j && i != 0)
-        DCHECK(!this->Get(i)->SemanticIs(this->Get(j)));  // (5)
+      if (i != j && i != 0) DCHECK(!this->Get(i)->Is(this->Get(j)));  // (5)
     }
   }
   DCHECK(!this->Get(1)->IsRange() ||
@@ -686,15 +543,12 @@
   return true;
 }
 
-
 // -----------------------------------------------------------------------------
 // Union and intersection
 
-
 static bool AddIsSafe(int x, int y) {
-  return x >= 0 ?
-      y <= std::numeric_limits<int>::max() - x :
-      y >= std::numeric_limits<int>::min() - x;
+  return x >= 0 ? y <= std::numeric_limits<int>::max() - x
+                : y >= std::numeric_limits<int>::min() - x;
 }
 
 Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
@@ -713,25 +567,15 @@
 
   // Slow case: create union.
 
-  // Figure out the representation of the result first.
-  // The rest of the method should not change this representation and
-  // it should not make any decisions based on representations (i.e.,
-  // it should only use the semantic part of types).
-  const bitset representation =
-      type1->Representation() & type2->Representation();
-
   // Semantic subtyping check - this is needed for consistency with the
-  // semi-fast case above - we should behave the same way regardless of
-  // representations. Intersection with a universal bitset should only update
-  // the representations.
-  if (type1->SemanticIs(type2)) {
+  // semi-fast case above.
+  if (type1->Is(type2)) {
     type2 = Any();
-  } else if (type2->SemanticIs(type1)) {
+  } else if (type2->Is(type1)) {
     type1 = Any();
   }
 
-  bitset bits =
-      SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
+  bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
   int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
   int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
   if (!AddIsSafe(size1, size2)) return Any();
@@ -751,8 +595,7 @@
   // If the range is not empty, then insert it into the union and
   // remove the number bits from the bitset.
   if (!lims.IsEmpty()) {
-    size = UpdateRange(RangeType::New(lims, representation, zone), result, size,
-                       zone);
+    size = UpdateRange(RangeType::New(lims, zone), result, size, zone);
 
     // Remove the number bits.
     bitset number_bits = BitsetType::NumberBits(bits);
@@ -772,8 +615,8 @@
   }
 
   // Remove any components that just got subsumed.
-  for (int i = 2; i < size; ) {
-    if (result->Get(i)->SemanticIs(range)) {
+  for (int i = 2; i < size;) {
+    if (result->Get(i)->Is(range)) {
       result->Set(i, result->Get(--size));
     } else {
       ++i;
@@ -817,7 +660,7 @@
     return size;
   }
 
-  if (!BitsetType::SemanticIsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+  if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
     return size;
   }
 
@@ -830,10 +673,6 @@
       }
       return size;
     }
-    if (rhs->IsClass()) {
-      *lims =
-          RangeType::Limits::Union(RangeType::Limits(lhs->AsRange()), *lims);
-    }
     if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
       return AddToUnion(rhs, result, size, zone);
     }
@@ -853,16 +692,12 @@
   if (lhs->IsBitset() || rhs->IsBitset()) {
     return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, zone);
   }
-  if (lhs->IsClass() != rhs->IsClass()) {
-    return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, zone);
-  }
   if (lhs->SimplyEquals(rhs)) {
     return AddToUnion(lhs, result, size, zone);
   }
   return size;
 }
 
-
 // Make sure that we produce a well-formed range and bitset:
 // If the range is non-empty, the number bits in the bitset should be
 // clear. Moreover, if we have a canonical range (such as Signed32),
@@ -877,7 +712,7 @@
 
   // If the range is semantically contained within the bitset, return None and
   // leave the bitset untouched.
-  bitset range_lub = SEMANTIC(range->BitsetLub());
+  bitset range_lub = range->BitsetLub();
   if (BitsetType::Is(range_lub, *bits)) {
     return None();
   }
@@ -905,7 +740,7 @@
   if (bitset_max > range_max) {
     range_max = bitset_max;
   }
-  return RangeType::New(range_min, range_max, BitsetType::kNone, zone);
+  return RangeType::New(range_min, range_max, zone);
 }
 
 Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
@@ -922,13 +757,6 @@
   if (type1->Is(type2)) return type2;
   if (type2->Is(type1)) return type1;
 
-  // Figure out the representation of the result.
-  // The rest of the method should not change this representation and
-  // it should not make any decisions based on representations (i.e.,
-  // it should only use the semantic part of types).
-  const bitset representation =
-      type1->Representation() | type2->Representation();
-
   // Slow case: create union.
   int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
   int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
@@ -941,7 +769,7 @@
   size = 0;
 
   // Compute the new bitset.
-  bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
+  bitset new_bitset = type1->BitsetGlb() | type2->BitsetGlb();
 
   // Deal with ranges.
   Type* range = None();
@@ -951,14 +779,13 @@
     RangeType::Limits lims =
         RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
                                  RangeType::Limits(range2->AsRange()));
-    Type* union_range = RangeType::New(lims, representation, zone);
+    Type* union_range = RangeType::New(lims, zone);
     range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
   } else if (range1 != NULL) {
     range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
   } else if (range2 != NULL) {
     range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
   }
-  new_bitset = SEMANTIC(new_bitset) | representation;
   Type* bits = BitsetType::New(new_bitset);
   result->Set(size++, bits);
   if (!range->IsNone()) result->Set(size++, range);
@@ -968,7 +795,6 @@
   return NormalizeUnion(result_type, size, zone);
 }
 
-
 // Add [type] to [result] unless [type] is bitset, range, or already subsumed.
 // Return new size of [result].
 int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
@@ -980,7 +806,7 @@
     return size;
   }
   for (int i = 0; i < size; ++i) {
-    if (type->SemanticIs(result->Get(i))) return size;
+    if (type->Is(result->Get(i))) return size;
   }
   result->Set(size++, type);
   return size;
@@ -996,15 +822,10 @@
   }
   bitset bits = unioned->Get(0)->AsBitset();
   // If the union only consists of a range, we can get rid of the union.
-  if (size == 2 && SEMANTIC(bits) == BitsetType::kNone) {
-    bitset representation = REPRESENTATION(bits);
-    if (representation == unioned->Get(1)->Representation()) {
-      return unioned->Get(1);
-    }
+  if (size == 2 && bits == BitsetType::kNone) {
     if (unioned->Get(1)->IsRange()) {
       return RangeType::New(unioned->Get(1)->AsRange()->Min(),
-                            unioned->Get(1)->AsRange()->Max(),
-                            unioned->Get(0)->AsBitset(), zone);
+                            unioned->Get(1)->AsRange()->Max(), zone);
     }
   }
   unioned->Shrink(size);
@@ -1012,40 +833,9 @@
   return union_type;
 }
 
-
-// -----------------------------------------------------------------------------
-// Component extraction
-
-// static
-Type* Type::Representation(Type* t, Zone* zone) {
-  return BitsetType::New(t->Representation());
-}
-
-
-// static
-Type* Type::Semantic(Type* t, Zone* zone) {
-  return Intersect(t, BitsetType::New(BitsetType::kSemantic), zone);
-}
-
-
 // -----------------------------------------------------------------------------
 // Iteration.
 
-int Type::NumClasses() {
-  DisallowHeapAllocation no_allocation;
-  if (this->IsClass()) {
-    return 1;
-  } else if (this->IsUnion()) {
-    int result = 0;
-    for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-      if (this->AsUnion()->Get(i)->IsClass()) ++result;
-    }
-    return result;
-  } else {
-    return 0;
-  }
-}
-
 int Type::NumConstants() {
   DisallowHeapAllocation no_allocation;
   if (this->IsConstant()) {
@@ -1061,78 +851,17 @@
   }
 }
 
-template <class T>
-Type* Type::Iterator<T>::get_type() {
-  DCHECK(!Done());
-  return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
-}
-
-
-// C++ cannot specialise nested templates, so we have to go through this
-// contortion with an auxiliary template to simulate it.
-template <class T>
-struct TypeImplIteratorAux {
-  static bool matches(Type* type);
-  static i::Handle<T> current(Type* type);
-};
-
-template <>
-struct TypeImplIteratorAux<i::Map> {
-  static bool matches(Type* type) { return type->IsClass(); }
-  static i::Handle<i::Map> current(Type* type) {
-    return type->AsClass()->Map();
-  }
-};
-
-template <>
-struct TypeImplIteratorAux<i::Object> {
-  static bool matches(Type* type) { return type->IsConstant(); }
-  static i::Handle<i::Object> current(Type* type) {
-    return type->AsConstant()->Value();
-  }
-};
-
-template <class T>
-bool Type::Iterator<T>::matches(Type* type) {
-  return TypeImplIteratorAux<T>::matches(type);
-}
-
-template <class T>
-i::Handle<T> Type::Iterator<T>::Current() {
-  return TypeImplIteratorAux<T>::current(get_type());
-}
-
-template <class T>
-void Type::Iterator<T>::Advance() {
-  DisallowHeapAllocation no_allocation;
-  ++index_;
-  if (type_->IsUnion()) {
-    for (int n = type_->AsUnion()->Length(); index_ < n; ++index_) {
-      if (matches(type_->AsUnion()->Get(index_))) return;
-    }
-  } else if (index_ == 0 && matches(type_)) {
-    return;
-  }
-  index_ = -1;
-}
-
-
 // -----------------------------------------------------------------------------
 // Printing.
 
 const char* BitsetType::Name(bitset bits) {
   switch (bits) {
-    case REPRESENTATION(kAny): return "Any";
-    #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
-    case REPRESENTATION(k##type): return #type;
-    REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
-    #undef RETURN_NAMED_REPRESENTATION_TYPE
-
-    #define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
-    case SEMANTIC(k##type): return #type;
-    SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
-    INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
-    #undef RETURN_NAMED_SEMANTIC_TYPE
+#define RETURN_NAMED_TYPE(type, value) \
+  case k##type:                        \
+    return #type;
+    PROPER_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+    INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+#undef RETURN_NAMED_TYPE
 
     default:
       return NULL;
@@ -1150,13 +879,9 @@
 
   // clang-format off
   static const bitset named_bitsets[] = {
-#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
-    REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
-#undef BITSET_CONSTANT
-
-#define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
+#define BITSET_CONSTANT(type, value) k##type,
     INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
-    SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+    PROPER_BITSET_TYPE_LIST(BITSET_CONSTANT)
 #undef BITSET_CONSTANT
   };
   // clang-format on
@@ -1176,71 +901,40 @@
   os << ")";
 }
 
-void Type::PrintTo(std::ostream& os, PrintDimension dim) {
+void Type::PrintTo(std::ostream& os) {
   DisallowHeapAllocation no_allocation;
-  if (dim != REPRESENTATION_DIM) {
-    if (this->IsBitset()) {
-      BitsetType::Print(os, SEMANTIC(this->AsBitset()));
-    } else if (this->IsClass()) {
-      os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < ";
-      BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
-      os << ")";
-    } else if (this->IsConstant()) {
-      os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
-    } else if (this->IsRange()) {
-      std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
-      std::streamsize saved_precision = os.precision(0);
-      os << "Range(" << this->AsRange()->Min() << ", " << this->AsRange()->Max()
-         << ")";
-      os.flags(saved_flags);
-      os.precision(saved_precision);
-    } else if (this->IsContext()) {
-      os << "Context(";
-      this->AsContext()->Outer()->PrintTo(os, dim);
-      os << ")";
-    } else if (this->IsUnion()) {
-      os << "(";
-      for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-        Type* type_i = this->AsUnion()->Get(i);
-        if (i > 0) os << " | ";
-        type_i->PrintTo(os, dim);
-      }
-      os << ")";
-    } else if (this->IsArray()) {
-      os << "Array(";
-      AsArray()->Element()->PrintTo(os, dim);
-      os << ")";
-    } else if (this->IsFunction()) {
-      if (!this->AsFunction()->Receiver()->IsAny()) {
-        this->AsFunction()->Receiver()->PrintTo(os, dim);
-        os << ".";
-      }
-      os << "(";
-      for (int i = 0; i < this->AsFunction()->Arity(); ++i) {
-        if (i > 0) os << ", ";
-        this->AsFunction()->Parameter(i)->PrintTo(os, dim);
-      }
-      os << ")->";
-      this->AsFunction()->Result()->PrintTo(os, dim);
-    } else if (this->IsTuple()) {
-      os << "<";
-      for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
-        Type* type_i = this->AsTuple()->Element(i);
-        if (i > 0) os << ", ";
-        type_i->PrintTo(os, dim);
-      }
-      os << ">";
-    } else {
-      UNREACHABLE();
+  if (this->IsBitset()) {
+    BitsetType::Print(os, this->AsBitset());
+  } else if (this->IsConstant()) {
+    os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
+  } else if (this->IsRange()) {
+    std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
+    std::streamsize saved_precision = os.precision(0);
+    os << "Range(" << this->AsRange()->Min() << ", " << this->AsRange()->Max()
+       << ")";
+    os.flags(saved_flags);
+    os.precision(saved_precision);
+  } else if (this->IsUnion()) {
+    os << "(";
+    for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+      Type* type_i = this->AsUnion()->Get(i);
+      if (i > 0) os << " | ";
+      type_i->PrintTo(os);
     }
-  }
-  if (dim == BOTH_DIMS) os << "/";
-  if (dim != SEMANTIC_DIM) {
-    BitsetType::Print(os, REPRESENTATION(this->BitsetLub()));
+    os << ")";
+  } else if (this->IsTuple()) {
+    os << "<";
+    for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
+      Type* type_i = this->AsTuple()->Element(i);
+      if (i > 0) os << ", ";
+      type_i->PrintTo(os);
+    }
+    os << ">";
+  } else {
+    UNREACHABLE();
   }
 }
 
-
 #ifdef DEBUG
 void Type::Print() {
   OFStream os(stdout);
@@ -1262,18 +956,6 @@
   return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
 }
 
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  Type* Type::Name(Isolate* isolate, Zone* zone) {                   \
-    return Class(i::handle(isolate->heap()->name##_map()), zone);    \
-  }
-SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
-// -----------------------------------------------------------------------------
-// Instantiations.
-
-template class Type::Iterator<i::Map>;
-template class Type::Iterator<i::Object>;
-
+}  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/types.h b/src/compiler/types.h
new file mode 100644
index 0000000..ef5bec3
--- /dev/null
+++ b/src/compiler/types.h
@@ -0,0 +1,607 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPES_H_
+#define V8_COMPILER_TYPES_H_
+
+#include "src/conversions.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// SUMMARY
+//
+// A simple type system for compiler-internal use. It is based entirely on
+// union types, and all subtyping hence amounts to set inclusion. Besides the
+// obvious primitive types and some predefined unions, the type language also
+// can express class types (a.k.a. specific maps) and singleton types (i.e.,
+// concrete constants).
+//
+// The following equations and inequations hold:
+//
+//   None <= T
+//   T <= Any
+//
+//   Number = Signed32 \/ Unsigned32 \/ Double
+//   Smi <= Signed32
+//   Name = String \/ Symbol
+//   UniqueName = InternalizedString \/ Symbol
+//   InternalizedString < String
+//
+//   Receiver = Object \/ Proxy
+//   OtherUndetectable < Object
+//   DetectableReceiver = Receiver - OtherUndetectable
+//
+//   Constant(x) < T  iff instance_type(map(x)) < T
+//
+//
+// RANGE TYPES
+//
+// A range type represents a continuous integer interval by its minimum and
+// maximum value.  Either value may be an infinity, in which case that infinity
+// itself is also included in the range.   A range never contains NaN or -0.
+//
+// If a value v happens to be an integer n, then Constant(v) is considered a
+// subtype of Range(n, n) (and therefore also a subtype of any larger range).
+// In order to avoid large unions, however, it is usually a good idea to use
+// Range rather than Constant.
+//
+//
+// PREDICATES
+//
+// There are two main functions for testing types:
+//
+//   T1->Is(T2)     -- tests whether T1 is included in T2 (i.e., T1 <= T2)
+//   T1->Maybe(T2)  -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
+//
+// Typically, the former is to be used to select representations (e.g., via
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
+//
+// There is no functionality to discover whether a type is a leaf in the
+// lattice. That is intentional. It should always be possible to refine the
+// lattice (e.g., splitting up number types further) without invalidating any
+// existing assumptions or tests.
+// Consequently, do not normally use Equals for type tests, always use Is!
+//
+// The NowIs operator implements state-sensitive subtying, as described above.
+// Any compilation decision based on such temporary properties requires runtime
+// guarding!
+//
+//
+// PROPERTIES
+//
+// Various formal properties hold for constructors, operators, and predicates
+// over types. For example, constructors are injective and subtyping is a
+// complete partial order.
+//
+// See test/cctest/test-types.cc for a comprehensive executable specification,
+// especially with respect to the properties of the more exotic 'temporal'
+// constructors and predicates (those prefixed 'Now').
+//
+//
+// IMPLEMENTATION
+//
+// Internally, all 'primitive' types, and their unions, are represented as
+// bitsets. Bit 0 is reserved for tagging. Only structured types require
+// allocation.
+
+// -----------------------------------------------------------------------------
+// Values for bitset types
+
+// clang-format off
+
+#define INTERNAL_BITSET_TYPE_LIST(V)                                      \
+  V(OtherUnsigned31, 1u << 1)  \
+  V(OtherUnsigned32, 1u << 2)  \
+  V(OtherSigned32,   1u << 3)  \
+  V(OtherNumber,     1u << 4)  \
+
+#define PROPER_BITSET_TYPE_LIST(V) \
+  V(None,                0u)        \
+  V(Negative31,          1u << 5)   \
+  V(Null,                1u << 6)   \
+  V(Undefined,           1u << 7)   \
+  V(Boolean,             1u << 8)   \
+  V(Unsigned30,          1u << 9)   \
+  V(MinusZero,           1u << 10)  \
+  V(NaN,                 1u << 11)  \
+  V(Symbol,              1u << 12)  \
+  V(InternalizedString,  1u << 13)  \
+  V(OtherString,         1u << 14)  \
+  V(Simd,                1u << 15)  \
+  V(OtherObject,         1u << 17)  \
+  V(OtherUndetectable,   1u << 16)  \
+  V(Proxy,               1u << 18)  \
+  V(Function,            1u << 19)  \
+  V(Hole,                1u << 20)  \
+  V(OtherInternal,       1u << 21)  \
+  \
+  V(Signed31,                   kUnsigned30 | kNegative31) \
+  V(Signed32,                   kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
+  V(Signed32OrMinusZero,        kSigned32 | kMinusZero) \
+  V(Signed32OrMinusZeroOrNaN,   kSigned32 | kMinusZero | kNaN) \
+  V(Negative32,                 kNegative31 | kOtherSigned32) \
+  V(Unsigned31,                 kUnsigned30 | kOtherUnsigned31) \
+  V(Unsigned32,                 kUnsigned30 | kOtherUnsigned31 | \
+                                kOtherUnsigned32) \
+  V(Unsigned32OrMinusZero,      kUnsigned32 | kMinusZero) \
+  V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
+  V(Integral32,                 kSigned32 | kUnsigned32) \
+  V(PlainNumber,                kIntegral32 | kOtherNumber) \
+  V(OrderedNumber,              kPlainNumber | kMinusZero) \
+  V(MinusZeroOrNaN,             kMinusZero | kNaN) \
+  V(Number,                     kOrderedNumber | kNaN) \
+  V(String,                     kInternalizedString | kOtherString) \
+  V(UniqueName,                 kSymbol | kInternalizedString) \
+  V(Name,                       kSymbol | kString) \
+  V(BooleanOrNumber,            kBoolean | kNumber) \
+  V(BooleanOrNullOrNumber,      kBooleanOrNumber | kNull) \
+  V(BooleanOrNullOrUndefined,   kBoolean | kNull | kUndefined) \
+  V(NullOrNumber,               kNull | kNumber) \
+  V(NullOrUndefined,            kNull | kUndefined) \
+  V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
+  V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
+  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
+  V(NumberOrString,             kNumber | kString) \
+  V(NumberOrUndefined,          kNumber | kUndefined) \
+  V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
+  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
+  V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
+  V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
+  V(Receiver,                   kObject | kProxy) \
+  V(StringOrReceiver,           kString | kReceiver) \
+  V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
+                                kReceiver) \
+  V(Internal,                   kHole | kOtherInternal) \
+  V(NonInternal,                kPrimitive | kReceiver) \
+  V(NonNumber,                  kUnique | kString | kInternal) \
+  V(Any,                        0xfffffffeu)
+
+// clang-format on
+
+/*
+ * The following diagrams show how integers (in the mathematical sense) are
+ * divided among the different atomic numerical types.
+ *
+ *   ON    OS32     N31     U30     OU31    OU32     ON
+ * ______[_______[_______[_______[_______[_______[_______
+ *     -2^31   -2^30     0      2^30    2^31    2^32
+ *
+ * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ * Some of the atomic numerical bitsets are internal only (see
+ * INTERNAL_BITSET_TYPE_LIST).  To a types user, they should only occur in
+ * union with certain other bitsets.  For instance, OtherNumber should only
+ * occur as part of PlainNumber.
+ */
+
+#define BITSET_TYPE_LIST(V)    \
+  INTERNAL_BITSET_TYPE_LIST(V) \
+  PROPER_BITSET_TYPE_LIST(V)
+
+class Type;
+
+// -----------------------------------------------------------------------------
+// Bitset types (internal).
+
+class BitsetType {
+ public:
+  typedef uint32_t bitset;  // Internal
+
+  enum : uint32_t {
+#define DECLARE_TYPE(type, value) k##type = (value),
+    BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+        kUnusedEOL = 0
+  };
+
+  static bitset SignedSmall();
+  static bitset UnsignedSmall();
+
+  bitset Bitset() {
+    return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
+  }
+
+  static bool IsInhabited(bitset bits) { return bits != kNone; }
+
+  static bool Is(bitset bits1, bitset bits2) {
+    return (bits1 | bits2) == bits2;
+  }
+
+  static double Min(bitset);
+  static double Max(bitset);
+
+  static bitset Glb(Type* type);  // greatest lower bound that's a bitset
+  static bitset Glb(double min, double max);
+  static bitset Lub(Type* type);  // least upper bound that's a bitset
+  static bitset Lub(i::Map* map);
+  static bitset Lub(i::Object* value);
+  static bitset Lub(double value);
+  static bitset Lub(double min, double max);
+  static bitset ExpandInternals(bitset bits);
+
+  static const char* Name(bitset);
+  static void Print(std::ostream& os, bitset);  // NOLINT
+#ifdef DEBUG
+  static void Print(bitset);
+#endif
+
+  static bitset NumberBits(bitset bits);
+
+  static bool IsBitset(Type* type) {
+    return reinterpret_cast<uintptr_t>(type) & 1;
+  }
+
+  static Type* NewForTesting(bitset bits) { return New(bits); }
+
+ private:
+  friend class Type;
+
+  static Type* New(bitset bits) {
+    return reinterpret_cast<Type*>(static_cast<uintptr_t>(bits | 1u));
+  }
+
+  struct Boundary {
+    bitset internal;
+    bitset external;
+    double min;
+  };
+  static const Boundary BoundariesArray[];
+  static inline const Boundary* Boundaries();
+  static inline size_t BoundariesSize();
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for non-bitset types (internal).
+class TypeBase {
+ protected:
+  friend class Type;
+
+  enum Kind { kConstant, kTuple, kUnion, kRange };
+
+  Kind kind() const { return kind_; }
+  explicit TypeBase(Kind kind) : kind_(kind) {}
+
+  static bool IsKind(Type* type, Kind kind) {
+    if (BitsetType::IsBitset(type)) return false;
+    TypeBase* base = reinterpret_cast<TypeBase*>(type);
+    return base->kind() == kind;
+  }
+
+  // The hacky conversion to/from Type*.
+  static Type* AsType(TypeBase* type) { return reinterpret_cast<Type*>(type); }
+  static TypeBase* FromType(Type* type) {
+    return reinterpret_cast<TypeBase*>(type);
+  }
+
+ private:
+  Kind kind_;
+};
+
+// -----------------------------------------------------------------------------
+// Constant types.
+
+class ConstantType : public TypeBase {
+ public:
+  i::Handle<i::Object> Value() { return object_; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+
+  static Type* New(i::Handle<i::Object> value, Zone* zone) {
+    BitsetType::bitset bitset = BitsetType::Lub(*value);
+    return AsType(new (zone->New(sizeof(ConstantType)))
+                      ConstantType(bitset, value));
+  }
+
+  static ConstantType* cast(Type* type) {
+    DCHECK(IsKind(type, kConstant));
+    return static_cast<ConstantType*>(FromType(type));
+  }
+
+  ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
+      : TypeBase(kConstant), bitset_(bitset), object_(object) {}
+
+  BitsetType::bitset Lub() { return bitset_; }
+
+  BitsetType::bitset bitset_;
+  Handle<i::Object> object_;
+};
+// TODO(neis): Also cache value if numerical.
+
+// -----------------------------------------------------------------------------
+// Range types.
+
+class RangeType : public TypeBase {
+ public:
+  struct Limits {
+    double min;
+    double max;
+    Limits(double min, double max) : min(min), max(max) {}
+    explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
+    bool IsEmpty();
+    static Limits Empty() { return Limits(1, 0); }
+    static Limits Intersect(Limits lhs, Limits rhs);
+    static Limits Union(Limits lhs, Limits rhs);
+  };
+
+  double Min() { return limits_.min; }
+  double Max() { return limits_.max; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+  friend class UnionType;
+
+  static Type* New(double min, double max, Zone* zone) {
+    return New(Limits(min, max), zone);
+  }
+
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+
+  static Type* New(Limits lim, Zone* zone) {
+    DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
+    DCHECK(lim.min <= lim.max);
+    BitsetType::bitset bits = BitsetType::Lub(lim.min, lim.max);
+
+    return AsType(new (zone->New(sizeof(RangeType))) RangeType(bits, lim));
+  }
+
+  static RangeType* cast(Type* type) {
+    DCHECK(IsKind(type, kRange));
+    return static_cast<RangeType*>(FromType(type));
+  }
+
+  RangeType(BitsetType::bitset bitset, Limits limits)
+      : TypeBase(kRange), bitset_(bitset), limits_(limits) {}
+
+  BitsetType::bitset Lub() { return bitset_; }
+
+  BitsetType::bitset bitset_;
+  Limits limits_;
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for types with variable number of type fields.
+class StructuralType : public TypeBase {
+ public:
+  int LengthForTesting() { return Length(); }
+
+ protected:
+  friend class Type;
+
+  int Length() { return length_; }
+
+  Type* Get(int i) {
+    DCHECK(0 <= i && i < this->Length());
+    return elements_[i];
+  }
+
+  void Set(int i, Type* type) {
+    DCHECK(0 <= i && i < this->Length());
+    elements_[i] = type;
+  }
+
+  void Shrink(int length) {
+    DCHECK(2 <= length && length <= this->Length());
+    length_ = length;
+  }
+
+  StructuralType(Kind kind, int length, i::Zone* zone)
+      : TypeBase(kind), length_(length) {
+    elements_ = reinterpret_cast<Type**>(zone->New(sizeof(Type*) * length));
+  }
+
+ private:
+  int length_;
+  Type** elements_;
+};
+
+// -----------------------------------------------------------------------------
+// Tuple types.
+
+class TupleType : public StructuralType {
+ public:
+  int Arity() { return this->Length(); }
+  Type* Element(int i) { return this->Get(i); }
+
+  void InitElement(int i, Type* type) { this->Set(i, type); }
+
+ private:
+  friend class Type;
+
+  TupleType(int length, Zone* zone) : StructuralType(kTuple, length, zone) {}
+
+  static Type* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(TupleType))) TupleType(length, zone));
+  }
+
+  static TupleType* cast(Type* type) {
+    DCHECK(IsKind(type, kTuple));
+    return static_cast<TupleType*>(FromType(type));
+  }
+};
+
+// -----------------------------------------------------------------------------
+// Union types (internal).
+// A union is a structured type with the following invariants:
+// - its length is at least 2
+// - at most one field is a bitset, and it must go into index 0
+// - no field is a union
+// - no field is a subtype of any other field
+class UnionType : public StructuralType {
+ private:
+  friend Type;
+  friend BitsetType;
+
+  UnionType(int length, Zone* zone) : StructuralType(kUnion, length, zone) {}
+
+  static Type* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(UnionType))) UnionType(length, zone));
+  }
+
+  static UnionType* cast(Type* type) {
+    DCHECK(IsKind(type, kUnion));
+    return static_cast<UnionType*>(FromType(type));
+  }
+
+  bool Wellformed();
+};
+
+class Type {
+ public:
+  typedef BitsetType::bitset bitset;  // Internal
+
+// Constructors.
+#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+  static Type* type() { return BitsetType::New(BitsetType::k##type); }
+  PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+#undef DEFINE_TYPE_CONSTRUCTOR
+
+  static Type* SignedSmall() {
+    return BitsetType::New(BitsetType::SignedSmall());
+  }
+  static Type* UnsignedSmall() {
+    return BitsetType::New(BitsetType::UnsignedSmall());
+  }
+
+  static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
+    return ConstantType::New(value, zone);
+  }
+  static Type* Range(double min, double max, Zone* zone) {
+    return RangeType::New(min, max, zone);
+  }
+  static Type* Tuple(Type* first, Type* second, Type* third, Zone* zone) {
+    Type* tuple = TupleType::New(3, zone);
+    tuple->AsTuple()->InitElement(0, first);
+    tuple->AsTuple()->InitElement(1, second);
+    tuple->AsTuple()->InitElement(2, third);
+    return tuple;
+  }
+
+  static Type* Union(Type* type1, Type* type2, Zone* zone);
+  static Type* Intersect(Type* type1, Type* type2, Zone* zone);
+
+  static Type* Of(double value, Zone* zone) {
+    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+  }
+  static Type* Of(i::Object* value, Zone* zone) {
+    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+  }
+  static Type* Of(i::Handle<i::Object> value, Zone* zone) {
+    return Of(*value, zone);
+  }
+
+  static Type* For(i::Map* map) {
+    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(map)));
+  }
+  static Type* For(i::Handle<i::Map> map) { return For(*map); }
+
+  // Predicates.
+  bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
+
+  bool Is(Type* that) { return this == that || this->SlowIs(that); }
+  bool Maybe(Type* that);
+  bool Equals(Type* that) { return this->Is(that) && that->Is(this); }
+
+  // Inspection.
+  bool IsRange() { return IsKind(TypeBase::kRange); }
+  bool IsConstant() { return IsKind(TypeBase::kConstant); }
+  bool IsTuple() { return IsKind(TypeBase::kTuple); }
+
+  ConstantType* AsConstant() { return ConstantType::cast(this); }
+  RangeType* AsRange() { return RangeType::cast(this); }
+  TupleType* AsTuple() { return TupleType::cast(this); }
+
+  // Minimum and maximum of a numeric type.
+  // These functions do not distinguish between -0 and +0.  If the type equals
+  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
+  // functions on subtypes of Number.
+  double Min();
+  double Max();
+
+  // Extracts a range from the type: if the type is a range or a union
+  // containing a range, that range is returned; otherwise, NULL is returned.
+  Type* GetRange();
+
+  static bool IsInteger(i::Object* x);
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+
+  int NumConstants();
+
+  // Printing.
+
+  void PrintTo(std::ostream& os);
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  // Helpers for testing.
+  bool IsBitsetForTesting() { return IsBitset(); }
+  bool IsUnionForTesting() { return IsUnion(); }
+  bitset AsBitsetForTesting() { return AsBitset(); }
+  UnionType* AsUnionForTesting() { return AsUnion(); }
+
+ private:
+  // Friends.
+  template <class>
+  friend class Iterator;
+  friend BitsetType;
+  friend UnionType;
+
+  // Internal inspection.
+  bool IsKind(TypeBase::Kind kind) { return TypeBase::IsKind(this, kind); }
+
+  bool IsNone() { return this == None(); }
+  bool IsAny() { return this == Any(); }
+  bool IsBitset() { return BitsetType::IsBitset(this); }
+  bool IsUnion() { return IsKind(TypeBase::kUnion); }
+
+  bitset AsBitset() {
+    DCHECK(this->IsBitset());
+    return reinterpret_cast<BitsetType*>(this)->Bitset();
+  }
+  UnionType* AsUnion() { return UnionType::cast(this); }
+
+  bitset BitsetGlb() { return BitsetType::Glb(this); }
+  bitset BitsetLub() { return BitsetType::Lub(this); }
+
+  bool SlowIs(Type* that);
+
+  static bool Overlap(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* range, ConstantType* constant);
+  static bool Contains(RangeType* range, i::Object* val);
+
+  static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
+
+  static RangeType::Limits IntersectRangeAndBitset(Type* range, Type* bits,
+                                                   Zone* zone);
+  static RangeType::Limits ToLimits(bitset bits, Zone* zone);
+
+  bool SimplyEquals(Type* that);
+
+  static int AddToUnion(Type* type, UnionType* result, int size, Zone* zone);
+  static int IntersectAux(Type* type, Type* other, UnionType* result, int size,
+                          RangeType::Limits* limits, Zone* zone);
+  static Type* NormalizeUnion(Type* unioned, int size, Zone* zone);
+  static Type* NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_TYPES_H_
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index eb42b39..b9faeee 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -48,7 +48,7 @@
       FATAL(str.str().c_str());
     }
   }
-  void CheckUpperIs(Node* node, Type* type) {
+  void CheckTypeIs(Node* node, Type* type) {
     if (typing == TYPED && !NodeProperties::GetType(node)->Is(type)) {
       std::ostringstream str;
       str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -59,7 +59,7 @@
       FATAL(str.str().c_str());
     }
   }
-  void CheckUpperMaybe(Node* node, Type* type) {
+  void CheckTypeMaybe(Node* node, Type* type) {
     if (typing == TYPED && !NodeProperties::GetType(node)->Maybe(type)) {
       std::ostringstream str;
       str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -181,7 +181,7 @@
       CHECK_EQ(0, input_count);
       // Type is a tuple.
       // TODO(rossberg): Multiple outputs are currently typed as Internal.
-      CheckUpperIs(node, Type::Internal());
+      CheckTypeIs(node, Type::Internal());
       break;
     case IrOpcode::kEnd:
       // End has no outputs.
@@ -230,7 +230,7 @@
       Node* input = NodeProperties::GetControlInput(node, 0);
       CHECK(!input->op()->HasProperty(Operator::kNoThrow));
       // Type can be anything.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kSwitch: {
@@ -330,21 +330,21 @@
       CHECK_LE(-1, index);
       CHECK_LT(index + 1, start->op()->ValueOutputCount());
       // Type can be anything.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kInt32Constant:  // TODO(rossberg): rename Word32Constant?
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type is a 32 bit integer, signed or unsigned.
-      CheckUpperIs(node, Type::Integral32());
+      CheckTypeIs(node, Type::Integral32());
       break;
     case IrOpcode::kInt64Constant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type is internal.
       // TODO(rossberg): Introduce proper Int64 type.
-      CheckUpperIs(node, Type::Internal());
+      CheckTypeIs(node, Type::Internal());
       break;
     case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
@@ -352,7 +352,7 @@
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type is a number.
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kRelocatableInt32Constant:
     case IrOpcode::kRelocatableInt64Constant:
@@ -361,21 +361,19 @@
     case IrOpcode::kHeapConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
-      // Type can be anything represented as a heap pointer.
-      CheckUpperIs(node, Type::TaggedPointer());
       break;
     case IrOpcode::kExternalConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type is considered internal.
-      CheckUpperIs(node, Type::Internal());
+      CheckTypeIs(node, Type::Internal());
       break;
     case IrOpcode::kOsrValue:
       // OSR values have a value and a control input.
       CHECK_EQ(1, control_count);
       CHECK_EQ(1, input_count);
       // Type is merged from other values in the graph and could be any.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     case IrOpcode::kProjection: {
       // Projection has an input that produces enough values.
@@ -385,7 +383,7 @@
       // Type can be anything.
       // TODO(rossberg): Introduce tuple types for this.
       // TODO(titzer): Convince rossberg not to.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kSelect: {
@@ -495,7 +493,7 @@
     case IrOpcode::kJSLessThanOrEqual:
     case IrOpcode::kJSGreaterThanOrEqual:
       // Type is Boolean.
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
 
     case IrOpcode::kJSBitwiseOr:
@@ -505,80 +503,80 @@
     case IrOpcode::kJSShiftRight:
     case IrOpcode::kJSShiftRightLogical:
       // Type is 32 bit integral.
-      CheckUpperIs(node, Type::Integral32());
+      CheckTypeIs(node, Type::Integral32());
       break;
     case IrOpcode::kJSAdd:
       // Type is Number or String.
-      CheckUpperIs(node, Type::NumberOrString());
+      CheckTypeIs(node, Type::NumberOrString());
       break;
     case IrOpcode::kJSSubtract:
     case IrOpcode::kJSMultiply:
     case IrOpcode::kJSDivide:
     case IrOpcode::kJSModulus:
       // Type is Number.
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
 
     case IrOpcode::kJSToBoolean:
       // Type is Boolean.
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kJSToInteger:
       // Type is OrderedNumber.
-      CheckUpperIs(node, Type::OrderedNumber());
+      CheckTypeIs(node, Type::OrderedNumber());
       break;
     case IrOpcode::kJSToLength:
       // Type is OrderedNumber.
-      CheckUpperIs(node, Type::OrderedNumber());
+      CheckTypeIs(node, Type::OrderedNumber());
       break;
     case IrOpcode::kJSToName:
       // Type is Name.
-      CheckUpperIs(node, Type::Name());
+      CheckTypeIs(node, Type::Name());
       break;
     case IrOpcode::kJSToNumber:
       // Type is Number.
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kJSToString:
       // Type is String.
-      CheckUpperIs(node, Type::String());
+      CheckTypeIs(node, Type::String());
       break;
     case IrOpcode::kJSToObject:
       // Type is Receiver.
-      CheckUpperIs(node, Type::Receiver());
+      CheckTypeIs(node, Type::Receiver());
       break;
 
     case IrOpcode::kJSCreate:
       // Type is Object.
-      CheckUpperIs(node, Type::Object());
+      CheckTypeIs(node, Type::Object());
       break;
     case IrOpcode::kJSCreateArguments:
       // Type is OtherObject.
-      CheckUpperIs(node, Type::OtherObject());
+      CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSCreateArray:
       // Type is OtherObject.
-      CheckUpperIs(node, Type::OtherObject());
+      CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSCreateClosure:
       // Type is Function.
-      CheckUpperIs(node, Type::Function());
+      CheckTypeIs(node, Type::Function());
       break;
     case IrOpcode::kJSCreateIterResultObject:
       // Type is OtherObject.
-      CheckUpperIs(node, Type::OtherObject());
+      CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSCreateLiteralArray:
     case IrOpcode::kJSCreateLiteralObject:
     case IrOpcode::kJSCreateLiteralRegExp:
       // Type is OtherObject.
-      CheckUpperIs(node, Type::OtherObject());
+      CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSLoadProperty:
     case IrOpcode::kJSLoadNamed:
     case IrOpcode::kJSLoadGlobal:
       // Type can be anything.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     case IrOpcode::kJSStoreProperty:
     case IrOpcode::kJSStoreNamed:
@@ -589,17 +587,18 @@
     case IrOpcode::kJSDeleteProperty:
     case IrOpcode::kJSHasProperty:
     case IrOpcode::kJSInstanceOf:
+    case IrOpcode::kJSOrdinaryHasInstance:
       // Type is Boolean.
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kJSTypeOf:
       // Type is String.
-      CheckUpperIs(node, Type::String());
+      CheckTypeIs(node, Type::String());
       break;
 
     case IrOpcode::kJSLoadContext:
       // Type can be anything.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
     case IrOpcode::kJSStoreContext:
       // Type is empty.
@@ -612,44 +611,31 @@
     case IrOpcode::kJSCreateScriptContext: {
       // Type is Context, and operand is Internal.
       Node* context = NodeProperties::GetContextInput(node);
-      // TODO(rossberg): This should really be Is(Internal), but the typer
-      // currently can't do backwards propagation.
-      CheckUpperMaybe(context, Type::Internal());
-      if (typing == TYPED) CHECK(NodeProperties::GetType(node)->IsContext());
+      // TODO(bmeurer): This should say CheckTypeIs, but we don't have type
+      // OtherInternal on certain contexts, i.e. those from OsrValue inputs.
+      CheckTypeMaybe(context, Type::OtherInternal());
+      CheckTypeIs(node, Type::OtherInternal());
       break;
     }
 
     case IrOpcode::kJSCallConstruct:
     case IrOpcode::kJSConvertReceiver:
       // Type is Receiver.
-      CheckUpperIs(node, Type::Receiver());
+      CheckTypeIs(node, Type::Receiver());
       break;
     case IrOpcode::kJSCallFunction:
     case IrOpcode::kJSCallRuntime:
       // Type can be anything.
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
 
     case IrOpcode::kJSForInPrepare: {
       // TODO(bmeurer): What are the constraints on thse?
-      CheckUpperIs(node, Type::Any());
-      break;
-    }
-    case IrOpcode::kJSForInDone: {
-      // TODO(bmeurer): OSR breaks this invariant, although the node is not user
-      // visible, so we know it is safe (fullcodegen has an unsigned smi there).
-      // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kJSForInNext: {
-      CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
-      break;
-    }
-    case IrOpcode::kJSForInStep: {
-      // TODO(bmeurer): OSR breaks this invariant, although the node is not user
-      // visible, so we know it is safe (fullcodegen has an unsigned smi there).
-      // CheckValueInputIs(node, 0, Type::UnsignedSmall());
-      CheckUpperIs(node, Type::UnsignedSmall());
+      CheckTypeIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
       break;
     }
 
@@ -662,11 +648,11 @@
       break;
 
     case IrOpcode::kJSGeneratorRestoreContinuation:
-      CheckUpperIs(node, Type::SignedSmall());
+      CheckTypeIs(node, Type::SignedSmall());
       break;
 
     case IrOpcode::kJSGeneratorRestoreRegister:
-      CheckUpperIs(node, Type::Any());
+      CheckTypeIs(node, Type::Any());
       break;
 
     case IrOpcode::kJSStackCheck:
@@ -686,32 +672,32 @@
     case IrOpcode::kBooleanNot:
       // Boolean -> Boolean
       CheckValueInputIs(node, 0, Type::Boolean());
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberEqual:
       // (Number, Number) -> Boolean
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberLessThan:
     case IrOpcode::kNumberLessThanOrEqual:
       // (Number, Number) -> Boolean
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kSpeculativeNumberAdd:
     case IrOpcode::kSpeculativeNumberSubtract:
     case IrOpcode::kSpeculativeNumberMultiply:
     case IrOpcode::kSpeculativeNumberDivide:
     case IrOpcode::kSpeculativeNumberModulus:
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kSpeculativeNumberEqual:
     case IrOpcode::kSpeculativeNumberLessThan:
     case IrOpcode::kSpeculativeNumberLessThanOrEqual:
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberAdd:
     case IrOpcode::kNumberSubtract:
@@ -720,13 +706,13 @@
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kNumberModulus:
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kNumberBitwiseOr:
     case IrOpcode::kNumberBitwiseXor:
@@ -734,43 +720,43 @@
       // (Signed32, Signed32) -> Signed32
       CheckValueInputIs(node, 0, Type::Signed32());
       CheckValueInputIs(node, 1, Type::Signed32());
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kSpeculativeNumberBitwiseOr:
     case IrOpcode::kSpeculativeNumberBitwiseXor:
     case IrOpcode::kSpeculativeNumberBitwiseAnd:
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberShiftLeft:
     case IrOpcode::kNumberShiftRight:
       // (Signed32, Unsigned32) -> Signed32
       CheckValueInputIs(node, 0, Type::Signed32());
       CheckValueInputIs(node, 1, Type::Unsigned32());
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kSpeculativeNumberShiftLeft:
     case IrOpcode::kSpeculativeNumberShiftRight:
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberShiftRightLogical:
       // (Unsigned32, Unsigned32) -> Unsigned32
       CheckValueInputIs(node, 0, Type::Unsigned32());
       CheckValueInputIs(node, 1, Type::Unsigned32());
-      CheckUpperIs(node, Type::Unsigned32());
+      CheckTypeIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kSpeculativeNumberShiftRightLogical:
-      CheckUpperIs(node, Type::Unsigned32());
+      CheckTypeIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kNumberImul:
       // (Unsigned32, Unsigned32) -> Signed32
       CheckValueInputIs(node, 0, Type::Unsigned32());
       CheckValueInputIs(node, 1, Type::Unsigned32());
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberClz32:
       // Unsigned32 -> Unsigned32
       CheckValueInputIs(node, 0, Type::Unsigned32());
-      CheckUpperIs(node, Type::Unsigned32());
+      CheckTypeIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kNumberAtan2:
     case IrOpcode::kNumberMax:
@@ -779,7 +765,7 @@
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kNumberAbs:
     case IrOpcode::kNumberCeil:
@@ -810,32 +796,37 @@
     case IrOpcode::kNumberTrunc:
       // Number -> Number
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
+      break;
+    case IrOpcode::kNumberToBoolean:
+      // Number -> Boolean
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberToInt32:
       // Number -> Signed32
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Signed32());
+      CheckTypeIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberToUint32:
       // Number -> Unsigned32
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Unsigned32());
+      CheckTypeIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kPlainPrimitiveToNumber:
       // PlainPrimitive -> Number
       CheckValueInputIs(node, 0, Type::PlainPrimitive());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kPlainPrimitiveToWord32:
       // PlainPrimitive -> Integral32
       CheckValueInputIs(node, 0, Type::PlainPrimitive());
-      CheckUpperIs(node, Type::Integral32());
+      CheckTypeIs(node, Type::Integral32());
       break;
     case IrOpcode::kPlainPrimitiveToFloat64:
       // PlainPrimitive -> Number
       CheckValueInputIs(node, 0, Type::PlainPrimitive());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kStringEqual:
     case IrOpcode::kStringLessThan:
@@ -843,23 +834,28 @@
       // (String, String) -> Boolean
       CheckValueInputIs(node, 0, Type::String());
       CheckValueInputIs(node, 1, Type::String());
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kStringCharCodeAt:
       // (String, Unsigned32) -> UnsignedSmall
       CheckValueInputIs(node, 0, Type::String());
       CheckValueInputIs(node, 1, Type::Unsigned32());
-      CheckUpperIs(node, Type::UnsignedSmall());
+      CheckTypeIs(node, Type::UnsignedSmall());
       break;
     case IrOpcode::kStringFromCharCode:
       // Number -> String
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::String());
+      CheckTypeIs(node, Type::String());
+      break;
+    case IrOpcode::kStringFromCodePoint:
+      // (Unsigned32) -> String
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckTypeIs(node, Type::String());
       break;
     case IrOpcode::kReferenceEqual: {
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     }
     case IrOpcode::kObjectIsCallable:
@@ -868,24 +864,24 @@
     case IrOpcode::kObjectIsSmi:
     case IrOpcode::kObjectIsString:
     case IrOpcode::kObjectIsUndetectable:
+    case IrOpcode::kArrayBufferWasNeutered:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::Boolean());
+      CheckTypeIs(node, Type::Boolean());
       break;
     case IrOpcode::kAllocate:
       CheckValueInputIs(node, 0, Type::PlainNumber());
-      CheckUpperIs(node, Type::TaggedPointer());
       break;
     case IrOpcode::kEnsureWritableFastElements:
       CheckValueInputIs(node, 0, Type::Any());
       CheckValueInputIs(node, 1, Type::Internal());
-      CheckUpperIs(node, Type::Internal());
+      CheckTypeIs(node, Type::Internal());
       break;
     case IrOpcode::kMaybeGrowFastElements:
       CheckValueInputIs(node, 0, Type::Any());
       CheckValueInputIs(node, 1, Type::Internal());
       CheckValueInputIs(node, 2, Type::Unsigned31());
       CheckValueInputIs(node, 3, Type::Unsigned31());
-      CheckUpperIs(node, Type::Internal());
+      CheckTypeIs(node, Type::Internal());
       break;
     case IrOpcode::kTransitionElementsKind:
       CheckValueInputIs(node, 0, Type::Any());
@@ -900,7 +896,7 @@
       // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
       // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeTaggedToInt32: {
@@ -909,7 +905,7 @@
       // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
       // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeTaggedToUint32: {
@@ -918,7 +914,7 @@
       // Type* from = Type::Intersect(Type::Unsigned32(), Type::Tagged());
       // Type* to =Type::Intersect(Type::Unsigned32(), Type::UntaggedInt32());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeTaggedToFloat64: {
@@ -927,7 +923,7 @@
       // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
       // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kTruncateTaggedToFloat64: {
@@ -937,7 +933,7 @@
       // Type::Tagged());
       // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeInt31ToTaggedSigned: {
@@ -946,7 +942,7 @@
       // Type* from =Type::Intersect(Type::Signed31(), Type::UntaggedInt32());
       // Type* to = Type::Intersect(Type::Signed31(), Type::Tagged());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeInt32ToTagged: {
@@ -955,7 +951,7 @@
       // Type* from =Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
       // Type* to = Type::Intersect(Type::Signed32(), Type::Tagged());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeUint32ToTagged: {
@@ -964,7 +960,7 @@
       // Type* from=Type::Intersect(Type::Unsigned32(),Type::UntaggedInt32());
       // Type* to = Type::Intersect(Type::Unsigned32(), Type::Tagged());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeFloat64ToTagged: {
@@ -973,7 +969,7 @@
       // Type* from =Type::Intersect(Type::Number(), Type::UntaggedFloat64());
       // Type* to = Type::Intersect(Type::Number(), Type::Tagged());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeTaggedToBit: {
@@ -982,7 +978,7 @@
       // Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
       // Type* to = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kChangeBitToTagged: {
@@ -991,7 +987,7 @@
       // Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
       // Type* to = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
     case IrOpcode::kTruncateTaggedToWord32: {
@@ -1000,21 +996,23 @@
       // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
       // Type* to = Type::Intersect(Type::Number(), Type::UntaggedInt32());
       // CheckValueInputIs(node, 0, from));
-      // CheckUpperIs(node, to));
+      // CheckTypeIs(node, to));
       break;
     }
-    case IrOpcode::kImpossibleToWord32:
-    case IrOpcode::kImpossibleToWord64:
-    case IrOpcode::kImpossibleToFloat32:
-    case IrOpcode::kImpossibleToFloat64:
-    case IrOpcode::kImpossibleToTagged:
-    case IrOpcode::kImpossibleToBit:
+    case IrOpcode::kTruncateTaggedToBit:
       break;
 
     case IrOpcode::kCheckBounds:
       CheckValueInputIs(node, 0, Type::Any());
       CheckValueInputIs(node, 1, Type::Unsigned31());
-      CheckUpperIs(node, Type::Unsigned31());
+      CheckTypeIs(node, Type::Unsigned31());
+      break;
+    case IrOpcode::kCheckHeapObject:
+      CheckValueInputIs(node, 0, Type::Any());
+      break;
+    case IrOpcode::kCheckIf:
+      CheckValueInputIs(node, 0, Type::Boolean());
+      CheckNotTyped(node);
       break;
     case IrOpcode::kCheckMaps:
       // (Any, Internal, ..., Internal) -> Any
@@ -1026,23 +1024,14 @@
       break;
     case IrOpcode::kCheckNumber:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
+      break;
+    case IrOpcode::kCheckSmi:
+      CheckValueInputIs(node, 0, Type::Any());
       break;
     case IrOpcode::kCheckString:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::String());
-      break;
-    case IrOpcode::kCheckIf:
-      CheckValueInputIs(node, 0, Type::Boolean());
-      CheckNotTyped(node);
-      break;
-    case IrOpcode::kCheckTaggedSigned:
-      CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::TaggedSigned());
-      break;
-    case IrOpcode::kCheckTaggedPointer:
-      CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::TaggedPointer());
+      CheckTypeIs(node, Type::String());
       break;
 
     case IrOpcode::kCheckedInt32Add:
@@ -1052,32 +1041,35 @@
     case IrOpcode::kCheckedUint32Div:
     case IrOpcode::kCheckedUint32Mod:
     case IrOpcode::kCheckedInt32Mul:
+    case IrOpcode::kCheckedInt32ToTaggedSigned:
     case IrOpcode::kCheckedUint32ToInt32:
+    case IrOpcode::kCheckedUint32ToTaggedSigned:
     case IrOpcode::kCheckedFloat64ToInt32:
     case IrOpcode::kCheckedTaggedSignedToInt32:
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedToFloat64:
+    case IrOpcode::kCheckedTaggedToTaggedSigned:
     case IrOpcode::kCheckedTruncateTaggedToWord32:
       break;
 
     case IrOpcode::kCheckFloat64Hole:
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kCheckTaggedHole:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::NonInternal());
+      CheckTypeIs(node, Type::NonInternal());
       break;
     case IrOpcode::kConvertTaggedHoleToUndefined:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::NonInternal());
+      CheckTypeIs(node, Type::NonInternal());
       break;
 
     case IrOpcode::kLoadField:
       // Object -> fieldtype
       // TODO(rossberg): activate once machine ops are typed.
       // CheckValueInputIs(node, 0, Type::Object());
-      // CheckUpperIs(node, FieldAccessOf(node->op()).type));
+      // CheckTypeIs(node, FieldAccessOf(node->op()).type));
       break;
     case IrOpcode::kLoadBuffer:
       break;
@@ -1085,7 +1077,7 @@
       // Object -> elementtype
       // TODO(rossberg): activate once machine ops are typed.
       // CheckValueInputIs(node, 0, Type::Object());
-      // CheckUpperIs(node, ElementAccessOf(node->op()).type));
+      // CheckTypeIs(node, ElementAccessOf(node->op()).type));
       break;
     case IrOpcode::kLoadTypedElement:
       break;
@@ -1110,15 +1102,16 @@
       break;
     case IrOpcode::kNumberSilenceNaN:
       CheckValueInputIs(node, 0, Type::Number());
-      CheckUpperIs(node, Type::Number());
+      CheckTypeIs(node, Type::Number());
       break;
     case IrOpcode::kTypeGuard:
-      CheckUpperIs(node, TypeGuardTypeOf(node->op()));
+      CheckTypeIs(node, TypeGuardTypeOf(node->op()));
       break;
 
     // Machine operators
     // -----------------------
     case IrOpcode::kLoad:
+    case IrOpcode::kProtectedLoad:
     case IrOpcode::kStore:
     case IrOpcode::kStackSlot:
     case IrOpcode::kWord32And:
@@ -1245,7 +1238,9 @@
     case IrOpcode::kBitcastFloat64ToInt64:
     case IrOpcode::kBitcastInt32ToFloat32:
     case IrOpcode::kBitcastInt64ToFloat64:
+    case IrOpcode::kBitcastTaggedToWord:
     case IrOpcode::kBitcastWordToTagged:
+    case IrOpcode::kBitcastWordToTaggedSigned:
     case IrOpcode::kChangeInt32ToInt64:
     case IrOpcode::kChangeUint32ToUint64:
     case IrOpcode::kChangeInt32ToFloat64:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index e92a434..b003e99 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -189,26 +189,29 @@
 
   Node* GetTrapValue(wasm::FunctionSig* sig) {
     if (sig->return_count() > 0) {
-      switch (sig->GetReturn()) {
-        case wasm::kAstI32:
-          return jsgraph()->Int32Constant(0xdeadbeef);
-        case wasm::kAstI64:
-          return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
-        case wasm::kAstF32:
-          return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
-        case wasm::kAstF64:
-          return jsgraph()->Float64Constant(
-              bit_cast<double>(0xdeadbeefdeadbeef));
-          break;
-        default:
-          UNREACHABLE();
-          return nullptr;
-      }
+      return GetTrapValue(sig->GetReturn());
     } else {
       return jsgraph()->Int32Constant(0xdeadbeef);
     }
   }
 
+  Node* GetTrapValue(wasm::LocalType type) {
+    switch (type) {
+      case wasm::kAstI32:
+        return jsgraph()->Int32Constant(0xdeadbeef);
+      case wasm::kAstI64:
+        return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+      case wasm::kAstF32:
+        return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+      case wasm::kAstF64:
+        return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
+        break;
+      default:
+        UNREACHABLE();
+        return nullptr;
+    }
+  }
+
  private:
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
@@ -334,6 +337,19 @@
          NodeProperties::GetControlInput(phi) == merge;
 }
 
+bool WasmGraphBuilder::ThrowsException(Node* node, Node** if_success,
+                                       Node** if_exception) {
+  if (node->op()->HasProperty(compiler::Operator::kNoThrow)) {
+    return false;
+  }
+
+  *if_success = graph()->NewNode(jsgraph()->common()->IfSuccess(), node);
+  *if_exception =
+      graph()->NewNode(jsgraph()->common()->IfException(), node, node);
+
+  return true;
+}
+
 void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
   merge->AppendInput(jsgraph()->zone(), from);
@@ -932,8 +948,6 @@
       return BuildI64UConvertF32(input, position);
     case wasm::kExprI64UConvertF64:
       return BuildI64UConvertF64(input, position);
-    case wasm::kExprGrowMemory:
-      return BuildGrowMemory(input);
     case wasm::kExprI32AsmjsLoadMem8S:
       return BuildAsmjsLoadMem(MachineType::Int8(), input);
     case wasm::kExprI32AsmjsLoadMem8U:
@@ -995,16 +1009,11 @@
   DCHECK_NOT_NULL(*control_);
   DCHECK_NOT_NULL(*effect_);
 
-  if (count == 0) {
-    // Handle a return of void.
-    vals[0] = jsgraph()->Int32Constant(0);
-    count = 1;
-  }
-
   Node** buf = Realloc(vals, count, count + 2);
   buf[count] = *effect_;
   buf[count + 1] = *control_;
-  Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
+  Node* ret =
+      graph()->NewNode(jsgraph()->common()->Return(count), count + 2, vals);
 
   MergeControlToEnd(jsgraph(), ret);
   return ret;
@@ -1667,14 +1676,21 @@
   return load;
 }
 
-Node* WasmGraphBuilder::BuildGrowMemory(Node* input) {
+Node* WasmGraphBuilder::GrowMemory(Node* input) {
+  Diamond check_input_range(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(
+          jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+          jsgraph()->Uint32Constant(wasm::WasmModule::kMaxMemPages)),
+      BranchHint::kTrue);
+
+  check_input_range.Chain(*control_);
+
   Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
   const Runtime::Function* function = Runtime::FunctionForId(function_id);
   CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
       jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
       CallDescriptor::kNoFlags);
-  Node** control_ptr = control_;
-  Node** effect_ptr = effect_;
   wasm::ModuleEnv* module = module_;
   input = BuildChangeUint32ToSmi(input);
   Node* inputs[] = {
@@ -1683,13 +1699,86 @@
           ExternalReference(function_id, jsgraph()->isolate())),  // ref
       jsgraph()->Int32Constant(function->nargs),                  // arity
       jsgraph()->HeapConstant(module->instance->context),         // context
-      *effect_ptr,
-      *control_ptr};
-  Node* node = graph()->NewNode(jsgraph()->common()->Call(desc),
+      *effect_,
+      check_input_range.if_true};
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
                                 static_cast<int>(arraysize(inputs)), inputs);
-  *effect_ptr = node;
-  node = BuildChangeSmiToInt32(node);
-  return node;
+
+  Node* result = BuildChangeSmiToInt32(call);
+
+  result = check_input_range.Phi(MachineRepresentation::kWord32, result,
+                                 jsgraph()->Int32Constant(-1));
+  *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call, *effect_,
+                              check_input_range.merge);
+  *control_ = check_input_range.merge;
+  return result;
+}
+
+Node* WasmGraphBuilder::Throw(Node* input) {
+  MachineOperatorBuilder* machine = jsgraph()->machine();
+
+  // Pass the thrown value as two SMIs:
+  //
+  // upper = static_cast<uint32_t>(input) >> 16;
+  // lower = input & 0xFFFF;
+  //
+  // This is needed because we can't safely call BuildChangeInt32ToTagged from
+  // this method.
+  //
+  // TODO(wasm): figure out how to properly pass this to the runtime function.
+  Node* upper = BuildChangeInt32ToSmi(
+      graph()->NewNode(machine->Word32Shr(), input, Int32Constant(16)));
+  Node* lower = BuildChangeInt32ToSmi(
+      graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
+
+  Node* parameters[] = {lower, upper};  // thrown value
+  return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(),
+                            module_->instance->context, parameters,
+                            arraysize(parameters), effect_, *control_);
+}
+
+Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
+  CommonOperatorBuilder* common = jsgraph()->common();
+
+  Node* parameters[] = {input};  // caught value
+  Node* value =
+      BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
+                         module_->instance->context, parameters,
+                         arraysize(parameters), effect_, *control_);
+
+  Node* is_smi;
+  Node* is_heap;
+  Branch(BuildTestNotSmi(value), &is_heap, &is_smi);
+
+  // is_smi
+  Node* smi_i32 = BuildChangeSmiToInt32(value);
+  Node* is_smi_effect = *effect_;
+
+  // is_heap
+  *control_ = is_heap;
+  Node* heap_f64 = BuildLoadHeapNumberValue(value, is_heap);
+
+  // *control_ needs to point to the current control dependency (is_heap) in
+  // case BuildI32SConvertF64 needs to insert nodes that depend on the "current"
+  // control node.
+  Node* heap_i32 = BuildI32SConvertF64(heap_f64, position);
+  // *control_ contains the control node that should be used when merging the
+  // result for the catch clause. It may be different than *control_ because
+  // BuildI32SConvertF64 may introduce a new control node (used for trapping if
+  // heap_f64 cannot be converted to an i32.
+  is_heap = *control_;
+  Node* is_heap_effect = *effect_;
+
+  Node* merge = graph()->NewNode(common->Merge(2), is_heap, is_smi);
+  Node* effect_merge = graph()->NewNode(common->EffectPhi(2), is_heap_effect,
+                                        is_smi_effect, merge);
+
+  Node* value_i32 = graph()->NewNode(
+      common->Phi(MachineRepresentation::kWord32, 2), heap_i32, smi_i32, merge);
+
+  *control_ = merge;
+  *effect_ = effect_merge;
+  return value_i32;
 }
 
 Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
@@ -1961,6 +2050,7 @@
 }
 
 Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+                                      Node*** rets,
                                       wasm::WasmCodePosition position) {
   const size_t params = sig->parameter_count();
   const size_t extra = 2;  // effect and control inputs.
@@ -1980,32 +2070,37 @@
   SetSourcePosition(call, position);
 
   *effect_ = call;
+  size_t ret_count = sig->return_count();
+  if (ret_count == 0) return call;  // No return value.
+
+  *rets = Buffer(ret_count);
+  if (ret_count == 1) {
+    // Only a single return value.
+    (*rets)[0] = call;
+  } else {
+    // Create projections for all return values.
+    for (size_t i = 0; i < ret_count; i++) {
+      (*rets)[i] = graph()->NewNode(jsgraph()->common()->Projection(i), call,
+                                    graph()->start());
+    }
+  }
   return call;
 }
 
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args,
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
                                    wasm::WasmCodePosition position) {
   DCHECK_NULL(args[0]);
 
   // Add code object as constant.
-  args[0] = HeapConstant(module_->GetCodeOrPlaceholder(index));
+  Handle<Code> code = module_->GetFunctionCode(index);
+  DCHECK(!code.is_null());
+  args[0] = HeapConstant(code);
   wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
 
-  return BuildWasmCall(sig, args, position);
+  return BuildWasmCall(sig, args, rets, position);
 }
 
-Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args,
-                                   wasm::WasmCodePosition position) {
-  DCHECK_NULL(args[0]);
-
-  // Add code object as constant.
-  args[0] = HeapConstant(module_->GetImportCode(index));
-  wasm::FunctionSig* sig = module_->GetImportSignature(index);
-
-  return BuildWasmCall(sig, args, position);
-}
-
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args, Node*** rets,
                                      wasm::WasmCodePosition position) {
   DCHECK_NOT_NULL(args[0]);
   DCHECK(module_ && module_->instance);
@@ -2020,6 +2115,7 @@
   // Bounds check the index.
   uint32_t table_size =
       module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
+  wasm::FunctionSig* sig = module_->GetSignature(index);
   if (table_size > 0) {
     // Bounds check against the table size.
     Node* size = Uint32Constant(table_size);
@@ -2028,7 +2124,11 @@
   } else {
     // No function table. Generate a trap and return a constant.
     trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
-    return trap_->GetTrapValue(module_->GetSignature(index));
+    (*rets) = Buffer(sig->return_count());
+    for (size_t i = 0; i < sig->return_count(); i++) {
+      (*rets)[i] = trap_->GetTrapValue(sig->GetReturn(i));
+    }
+    return trap_->GetTrapValue(sig);
   }
   Node* table = FunctionTable(0);
 
@@ -2062,8 +2162,7 @@
       *effect_, *control_);
 
   args[0] = load_code;
-  wasm::FunctionSig* sig = module_->GetSignature(index);
-  return BuildWasmCall(sig, args, position);
+  return BuildWasmCall(sig, args, rets, position);
 }
 
 Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -2197,11 +2296,11 @@
     case wasm::kAstI32:
       return BuildChangeInt32ToTagged(node);
     case wasm::kAstI64:
-      DCHECK(module_ && !module_->instance->context.is_null());
-      // Throw a TypeError.
+      // Throw a TypeError. The native context is good enough here because we
+      // only throw a TypeError.
       return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
-                                module_->instance->context, nullptr, 0, effect_,
-                                *control_);
+                                jsgraph()->isolate()->native_context(), nullptr,
+                                0, effect_, *control_);
     case wasm::kAstF32:
       node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
                               node);
@@ -2359,15 +2458,11 @@
       break;
     }
     case wasm::kAstI64:
-      // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
-      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
-                             num);
-      if (jsgraph()->machine()->Is64()) {
-        // We cannot change an int32 to an int64 on a 32 bit platform. Instead
-        // we will split the parameter node later.
-        num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
-      }
-      break;
+      // Throw a TypeError. The native context is good enough here because we
+      // only throw a TypeError.
+      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+                                jsgraph()->isolate()->native_context(), nullptr,
+                                0, effect_, *control_);
     case wasm::kAstF32:
       num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
                              num);
@@ -2528,6 +2623,23 @@
   MergeControlToEnd(jsgraph(), ret);
 }
 
+int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
+                                        wasm::FunctionSig* sig) {
+  // Convert WASM numbers to JS values.
+  int param_index = 0;
+  for (int i = 0; i < param_count; ++i) {
+    Node* param = graph()->NewNode(
+        jsgraph()->common()->Parameter(param_index++), graph()->start());
+    args[pos++] = ToJS(param, sig->GetParam(i));
+    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+      // On 32 bit platforms we have to skip the high word of int64
+      // parameters.
+      param_index++;
+    }
+  }
+  return pos;
+}
+
 void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
                                             wasm::FunctionSig* sig) {
   DCHECK(target->IsCallable());
@@ -2548,18 +2660,14 @@
   *control_ = start;
   Node** args = Buffer(wasm_count + 7);
 
-  // The default context of the target.
-  Handle<Context> target_context = isolate->native_context();
+  Node* call;
+  bool direct_call = false;
 
-  // Optimization: check if the target is a JSFunction with the right arity so
-  // that we can call it directly.
-  bool call_direct = false;
-  int pos = 0;
   if (target->IsJSFunction()) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(target);
     if (function->shared()->internal_formal_parameter_count() == wasm_count) {
-      call_direct = true;
-
+      direct_call = true;
+      int pos = 0;
       args[pos++] = jsgraph()->Constant(target);  // target callable.
       // Receiver.
       if (is_sloppy(function->shared()->language_mode()) &&
@@ -2574,13 +2682,22 @@
       desc = Linkage::GetJSCallDescriptor(
           graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
 
-      // For a direct call we have to use the context of the JSFunction.
-      target_context = handle(function->context());
+      // Convert WASM numbers to JS values.
+      pos = AddParameterNodes(args, pos, wasm_count, sig);
+
+      args[pos++] = jsgraph()->UndefinedConstant();        // new target
+      args[pos++] = jsgraph()->Int32Constant(wasm_count);  // argument count
+      args[pos++] = HeapConstant(handle(function->context()));
+      args[pos++] = *effect_;
+      args[pos++] = *control_;
+
+      call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
     }
   }
 
   // We cannot call the target directly, we have to use the Call builtin.
-  if (!call_direct) {
+  if (!direct_call) {
+    int pos = 0;
     Callable callable = CodeFactory::Call(isolate);
     args[pos++] = jsgraph()->HeapConstant(callable.code());
     args[pos++] = jsgraph()->Constant(target);           // target callable
@@ -2591,31 +2708,22 @@
     desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
                                           callable.descriptor(), wasm_count + 1,
                                           CallDescriptor::kNoFlags);
+
+    // Convert WASM numbers to JS values.
+    pos = AddParameterNodes(args, pos, wasm_count, sig);
+
+    // The native_context is sufficient here, because all kind of callables
+    // which depend on the context provide their own context. The context here
+    // is only needed if the target is a constructor to throw a TypeError, if
+    // the target is a native function, or if the target is a callable JSObject,
+    // which can only be constructed by the runtime.
+    args[pos++] = HeapConstant(isolate->native_context());
+    args[pos++] = *effect_;
+    args[pos++] = *control_;
+
+    call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
   }
 
-  // Convert WASM numbers to JS values.
-  int param_index = 0;
-  for (int i = 0; i < wasm_count; ++i) {
-    Node* param =
-        graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
-    args[pos++] = ToJS(param, sig->GetParam(i));
-    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
-      // On 32 bit platforms we have to skip the high word of int64 parameters.
-      param_index++;
-    }
-  }
-
-  if (call_direct) {
-    args[pos++] = jsgraph()->UndefinedConstant();  // new target
-    args[pos++] = jsgraph()->Int32Constant(wasm_count);  // argument count
-  }
-
-  args[pos++] = HeapConstant(target_context);
-  args[pos++] = *effect_;
-  args[pos++] = *control_;
-
-  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
-
   // Convert the return value back.
   Node* ret;
   Node* val =
@@ -2650,6 +2758,30 @@
   }
 }
 
+Node* WasmGraphBuilder::CurrentMemoryPages() {
+  Runtime::FunctionId function_id = Runtime::kWasmMemorySize;
+  const Runtime::Function* function = Runtime::FunctionForId(function_id);
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
+      CallDescriptor::kNoFlags);
+  wasm::ModuleEnv* module = module_;
+  Node* inputs[] = {
+      jsgraph()->CEntryStubConstant(function->result_size),  // C entry
+      jsgraph()->ExternalConstant(
+          ExternalReference(function_id, jsgraph()->isolate())),  // ref
+      jsgraph()->Int32Constant(function->nargs),                  // arity
+      jsgraph()->HeapConstant(module->instance->context),         // context
+      *effect_,
+      *control_};
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
+                                static_cast<int>(arraysize(inputs)), inputs);
+
+  Node* result = BuildChangeSmiToInt32(call);
+
+  *effect_ = call;
+  return result;
+}
+
 Node* WasmGraphBuilder::MemSize(uint32_t offset) {
   DCHECK(module_ && module_->instance);
   uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
@@ -2715,19 +2847,34 @@
 
   // Check against the effective size.
   size_t effective_size;
-  if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
+  if (size == 0) {
     effective_size = 0;
+  } else if (offset >= size ||
+             (static_cast<uint64_t>(offset) + memsize) > size) {
+    // Two checks are needed in the case where the offset is statically
+    // out of bounds; one check for the offset being in bounds, and the next for
+    // the offset + index being out of bounds for code to be patched correctly
+    // on relocation.
+    effective_size = size - memsize + 1;
+    Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
+                                  jsgraph()->IntPtrConstant(offset),
+                                  jsgraph()->RelocatableInt32Constant(
+                                      static_cast<uint32_t>(effective_size),
+                                      RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+    trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+    DCHECK(offset >= effective_size);
+    effective_size = offset - effective_size;
   } else {
     effective_size = size - offset - memsize + 1;
-  }
-  CHECK(effective_size <= kMaxUInt32);
+    CHECK(effective_size <= kMaxUInt32);
 
-  Uint32Matcher m(index);
-  if (m.HasValue()) {
-    uint32_t value = m.Value();
-    if (value < effective_size) {
-      // The bounds check will always succeed.
-      return;
+    Uint32Matcher m(index);
+    if (m.HasValue()) {
+      uint32_t value = m.Value();
+      if (value < effective_size) {
+        // The bounds check will always succeed.
+        return;
+      }
     }
   }
 
@@ -2746,15 +2893,26 @@
   Node* load;
 
   // WASM semantics throw on OOB. Introduce explicit bounds check.
-  BoundsCheckMem(memtype, index, offset, position);
+  if (!FLAG_wasm_trap_handler) {
+    BoundsCheckMem(memtype, index, offset, position);
+  }
   bool aligned = static_cast<int>(alignment) >=
                  ElementSizeLog2Of(memtype.representation());
 
   if (aligned ||
       jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
-    load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
-                            MemBuffer(offset), index, *effect_, *control_);
+    if (FLAG_wasm_trap_handler) {
+      Node* context = HeapConstant(module_->instance->context);
+      Node* position_node = jsgraph()->Int32Constant(position);
+      load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
+                              MemBuffer(offset), index, context, position_node,
+                              *effect_, *control_);
+    } else {
+      load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+                              MemBuffer(offset), index, *effect_, *control_);
+    }
   } else {
+    DCHECK(!FLAG_wasm_trap_handler);
     load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
                             MemBuffer(offset), index, *effect_, *control_);
   }
@@ -2866,15 +3024,31 @@
     source_position_table_->SetSourcePosition(node, pos);
 }
 
+Node* WasmGraphBuilder::DefaultS128Value() {
+  // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
+  // instead of creating a SIMD Value.
+  return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
+                          Int32Constant(0), Int32Constant(0), Int32Constant(0),
+                          Int32Constant(0));
+}
+
 Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
                                const NodeVector& inputs) {
   switch (opcode) {
-    case wasm::kExprI32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
-                              inputs[0], inputs[1]);
     case wasm::kExprI32x4Splat:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
-                              inputs[0], inputs[0], inputs[0], inputs[0]);
+      return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+  }
+}
+
+Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
+                                        Node* input) {
+  switch (opcode) {
+    case wasm::kExprI32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
+                              Int32Constant(lane));
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 487ddcb..c980a87 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -9,10 +9,11 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/wasm/wasm-opcodes.h"
 #include "src/wasm/wasm-result.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -132,8 +133,12 @@
               wasm::WasmCodePosition position = wasm::kNoCodePosition);
   Node* Unop(wasm::WasmOpcode opcode, Node* input,
              wasm::WasmCodePosition position = wasm::kNoCodePosition);
+  Node* GrowMemory(Node* input);
+  Node* Throw(Node* input);
+  Node* Catch(Node* input, wasm::WasmCodePosition position);
   unsigned InputCount(Node* node);
   bool IsPhiWithMerge(Node* phi, Node* merge);
+  bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
   void AppendToMerge(Node* merge, Node* from);
   void AppendToPhi(Node* phi, Node* from);
 
@@ -150,12 +155,11 @@
   Node* ReturnVoid();
   Node* Unreachable(wasm::WasmCodePosition position);
 
-  Node* CallDirect(uint32_t index, Node** args,
+  Node* CallDirect(uint32_t index, Node** args, Node*** rets,
                    wasm::WasmCodePosition position);
-  Node* CallImport(uint32_t index, Node** args,
-                   wasm::WasmCodePosition position);
-  Node* CallIndirect(uint32_t index, Node** args,
+  Node* CallIndirect(uint32_t index, Node** args, Node*** rets,
                      wasm::WasmCodePosition position);
+
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
 
@@ -167,7 +171,7 @@
   //-----------------------------------------------------------------------
   // Operations that concern the linear memory.
   //-----------------------------------------------------------------------
-  Node* MemSize(uint32_t offset);
+  Node* CurrentMemoryPages();
   Node* GetGlobal(uint32_t index);
   Node* SetGlobal(uint32_t index, Node* val);
   Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
@@ -194,7 +198,10 @@
 
   void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
 
+  Node* DefaultS128Value();
+
   Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+  Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
 
  private:
   static const int kDefaultBufferSize = 16;
@@ -223,6 +230,7 @@
   Graph* graph();
 
   Node* String(const char* string);
+  Node* MemSize(uint32_t offset);
   Node* MemBuffer(uint32_t offset);
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
                       wasm::WasmCodePosition position);
@@ -234,7 +242,7 @@
   Node* MaskShiftCount64(Node* node);
 
   Node* BuildCCall(MachineSignature* sig, Node** args);
-  Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+  Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
                       wasm::WasmCodePosition position);
 
   Node* BuildF32CopySign(Node* left, Node* right);
@@ -301,6 +309,7 @@
 
   Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
                                 Node* control);
+
   Node* BuildChangeInt32ToTagged(Node* value);
   Node* BuildChangeFloat64ToTagged(Node* value);
   Node* BuildChangeTaggedToFloat64(Node* value);
@@ -315,7 +324,6 @@
   Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
   Node* BuildLoadHeapNumberValue(Node* value, Node* control);
   Node* BuildHeapNumberValueIndexConstant();
-  Node* BuildGrowMemory(Node* input);
 
   // Asm.js specific functionality.
   Node* BuildI32AsmjsSConvertF32(Node* input);
@@ -334,6 +342,9 @@
     if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
     return buf;
   }
+
+  int AddParameterNodes(Node** args, int pos, int param_count,
+                        wasm::FunctionSig* sig);
 };
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index c50f643..574db1c 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -11,7 +11,7 @@
 
 #include "src/compiler/linkage.h"
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -131,7 +131,7 @@
 // == s390x ==================================================================
 // ===========================================================================
 #define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
-#define GP_RETURN_REGISTERS r2
+#define GP_RETURN_REGISTERS r2, r3
 #define FP_PARAM_REGISTERS d0, d2, d4, d6
 #define FP_RETURN_REGISTERS d0, d2, d4, d6
 
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 49a097b..4d63e9a 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -4,11 +4,12 @@
 
 #include "src/compiler/code-generator.h"
 
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/wasm/wasm-module.h"
 #include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
 
@@ -132,6 +133,11 @@
         int32_t disp = InputInt32(NextOffset(offset));
         return Operand(index, scale, disp);
       }
+      case kMode_Root: {
+        Register base = kRootRegister;
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, disp);
+      }
       case kMode_None:
         UNREACHABLE();
         return Operand(no_reg, 0);
@@ -260,6 +266,40 @@
   RecordWriteMode const mode_;
 };
 
+class WasmOutOfLineTrap final : public OutOfLineCode {
+ public:
+  WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
+                    Register context, int32_t position)
+      : OutOfLineCode(gen),
+        pc_(pc),
+        frame_elided_(frame_elided),
+        context_(context),
+        position_(position) {}
+
+  void Generate() final {
+    // TODO(eholk): record pc_ and the current pc in a table so that
+    // the signal handler can find it.
+    USE(pc_);
+
+    if (frame_elided_) {
+      __ EnterFrame(StackFrame::WASM);
+    }
+
+    wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
+    int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
+    __ Push(Smi::FromInt(trap_reason));
+    __ Push(Smi::FromInt(position_));
+    __ Move(rsi, context_);
+    __ CallRuntime(Runtime::kThrowWasmError);
+  }
+
+ private:
+  Address pc_;
+  bool frame_elided_;
+  Register context_;
+  int32_t position_;
+};
+
 }  // namespace
 
 
@@ -866,9 +906,6 @@
     case kArchDebugBreak:
       __ int3();
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -878,8 +915,8 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1422,7 +1459,7 @@
       break;
     }
     case kSSEFloat64Sqrt:
-      ASSEMBLE_SSE_UNOP(sqrtsd);
+      ASSEMBLE_SSE_UNOP(Sqrtsd);
       break;
     case kSSEFloat64Round: {
       CpuFeatureScope sse_scope(masm(), SSE4_1);
@@ -1852,6 +1889,7 @@
       break;
     }
     case kX64Movl:
+    case kX64TrapMovl:
       if (instr->HasOutput()) {
         if (instr->addressing_mode() == kMode_None) {
           if (instr->InputAt(0)->IsRegister()) {
@@ -1860,7 +1898,14 @@
             __ movl(i.OutputRegister(), i.InputOperand(0));
           }
         } else {
+          Address pc = __ pc();
           __ movl(i.OutputRegister(), i.MemoryOperand());
+
+          if (arch_opcode == kX64TrapMovl) {
+            bool frame_elided = !frame_access_state()->has_frame();
+            new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
+                                           i.InputRegister(2), i.InputInt32(3));
+          }
         }
         __ AssertZeroExtended(i.OutputRegister());
       } else {
@@ -2032,6 +2077,18 @@
       __ xchgl(i.InputRegister(index), operand);
       break;
     }
+    case kX64Int32x4Create: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      XMMRegister dst = i.OutputSimd128Register();
+      __ Movd(dst, i.InputRegister(0));
+      __ shufps(dst, dst, 0x0);
+      break;
+    }
+    case kX64Int32x4ExtractLane: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
       break;
@@ -2252,13 +2309,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2449,7 +2507,11 @@
             if (value == 0) {
               __ xorl(dst, dst);
             } else {
-              __ movl(dst, Immediate(value));
+              if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+                __ movl(dst, Immediate(value, src.rmode()));
+              } else {
+                __ movl(dst, Immediate(value));
+              }
             }
           }
           break;
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 7ab1097..35acec0 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -128,6 +128,7 @@
   V(X64Movzxwq)                    \
   V(X64Movw)                       \
   V(X64Movl)                       \
+  V(X64TrapMovl)                   \
   V(X64Movsxlq)                    \
   V(X64Movq)                       \
   V(X64Movsd)                      \
@@ -145,7 +146,9 @@
   V(X64StackCheck)                 \
   V(X64Xchgb)                      \
   V(X64Xchgw)                      \
-  V(X64Xchgl)
+  V(X64Xchgl)                      \
+  V(X64Int32x4Create)              \
+  V(X64Int32x4ExtractLane)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
@@ -177,7 +180,8 @@
   V(M1I)  /* [      %r2*1 + K] */      \
   V(M2I)  /* [      %r2*2 + K] */      \
   V(M4I)  /* [      %r2*4 + K] */      \
-  V(M8I)  /* [      %r2*8 + K] */
+  V(M8I)  /* [      %r2*8 + K] */      \
+  V(Root) /* [%root       + K] */
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index fb4b749..4208d8a 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -36,10 +36,6 @@
     case kX64Imul32:
     case kX64ImulHigh32:
     case kX64UmulHigh32:
-    case kX64Idiv:
-    case kX64Idiv32:
-    case kX64Udiv:
-    case kX64Udiv32:
     case kX64Not:
     case kX64Not32:
     case kX64Neg:
@@ -127,10 +123,20 @@
     case kX64Lea:
     case kX64Dec32:
     case kX64Inc32:
+    case kX64Int32x4Create:
+    case kX64Int32x4ExtractLane:
       return (instr->addressing_mode() == kMode_None)
           ? kNoOpcodeFlags
           : kIsLoadOperation | kHasSideEffect;
 
+    case kX64Idiv:
+    case kX64Idiv32:
+    case kX64Udiv:
+    case kX64Udiv32:
+      return (instr->addressing_mode() == kMode_None)
+                 ? kMayNeedDeoptCheck
+                 : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+
     case kX64Movsxbl:
     case kX64Movzxbl:
     case kX64Movsxbq:
@@ -149,6 +155,7 @@
       return kHasSideEffect;
 
     case kX64Movl:
+    case kX64TrapMovl:
       if (instr->HasOutput()) {
         DCHECK(instr->InputCount() >= 1);
         return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 798d438..9a7657e 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -60,8 +60,7 @@
     switch (opcode) {
       case kX64Cmp:
       case kX64Test:
-        return rep == MachineRepresentation::kWord64 ||
-               rep == MachineRepresentation::kTagged;
+        return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
       case kX64Cmp32:
       case kX64Test32:
         return rep == MachineRepresentation::kWord32;
@@ -137,6 +136,22 @@
   AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
+    if (selector()->CanAddressRelativeToRootsRegister()) {
+      LoadMatcher<ExternalReferenceMatcher> m(operand);
+      if (m.index().HasValue() && m.object().HasValue()) {
+        Address const kRootsRegisterValue =
+            kRootRegisterBias +
+            reinterpret_cast<Address>(
+                selector()->isolate()->heap()->roots_array_start());
+        ptrdiff_t const delta =
+            m.index().Value() +
+            (m.object().Value().address() - kRootsRegisterValue);
+        if (is_int32(delta)) {
+          inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
+          return kMode_Root;
+        }
+      }
+    }
     BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
     DCHECK(m.matches());
     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
@@ -155,11 +170,9 @@
   }
 };
 
+namespace {
 
-void InstructionSelector::VisitLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-  X64OperandGenerator g(this);
-
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
   ArchOpcode opcode = kArchNop;
   switch (load_rep.representation()) {
     case MachineRepresentation::kFloat32:
@@ -187,9 +200,18 @@
     case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
-      return;
+      break;
   }
+  return opcode;
+}
 
+}  // namespace
+
+void InstructionSelector::VisitLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  X64OperandGenerator g(this);
+
+  ArchOpcode opcode = GetLoadOpcode(load_rep);
   InstructionOperand outputs[1];
   outputs[0] = g.DefineAsRegister(node);
   InstructionOperand inputs[3];
@@ -200,6 +222,24 @@
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  X64OperandGenerator g(this);
+
+  ArchOpcode opcode = GetLoadOpcode(load_rep);
+  InstructionOperand outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  // Add the context parameter as an input.
+  inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
+  // Add the source position as an input
+  inputs[input_count++] = g.UseImmediate(node->InputAt(3));
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  Emit(code, 1, outputs, input_count, inputs);
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   X64OperandGenerator g(this);
@@ -212,7 +252,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -701,6 +741,7 @@
           case kMode_M2I:
           case kMode_M4I:
           case kMode_M8I:
+          case kMode_Root:
             UNREACHABLE();
         }
         inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
@@ -1170,11 +1211,10 @@
   }
 }
 
+namespace {
 
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
-  X64OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  switch (value->opcode()) {
+bool ZeroExtendsWord32ToWord64(Node* node) {
+  switch (node->opcode()) {
     case IrOpcode::kWord32And:
     case IrOpcode::kWord32Or:
     case IrOpcode::kWord32Xor:
@@ -1195,14 +1235,35 @@
     case IrOpcode::kUint32LessThan:
     case IrOpcode::kUint32LessThanOrEqual:
     case IrOpcode::kUint32Mod:
-    case IrOpcode::kUint32MulHigh: {
+    case IrOpcode::kUint32MulHigh:
       // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
       // zero-extension is a no-op.
-      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-      return;
+      return true;
+    case IrOpcode::kProjection: {
+      Node* const value = node->InputAt(0);
+      switch (value->opcode()) {
+        case IrOpcode::kInt32AddWithOverflow:
+        case IrOpcode::kInt32SubWithOverflow:
+        case IrOpcode::kInt32MulWithOverflow:
+          return true;
+        default:
+          return false;
+      }
     }
     default:
-      break;
+      return false;
+  }
+}
+
+}  // namespace
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  X64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (ZeroExtendsWord32ToWord64(value)) {
+    // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+    // zero-extension is a no-op.
+    return EmitIdentity(node);
   }
   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
@@ -1276,8 +1337,7 @@
         Int64BinopMatcher m(value);
         if (m.right().Is(32)) {
           if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
-            Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-            return;
+            return EmitIdentity(node);
           }
           Emit(kX64Shr, g.DefineSameAsFirst(node),
                g.UseRegister(m.left().node()), g.TempImmediate(32));
@@ -2213,6 +2273,17 @@
   Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
 }
 
+void InstructionSelector::VisitCreateInt32x4(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4Create, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+}
+
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 29e2dd7..f5e6634 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -4,7 +4,7 @@
 
 #include "src/compiler/code-generator.h"
 
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
@@ -715,9 +715,6 @@
     case kArchDebugBreak:
       __ int3();
       break;
-    case kArchImpossible:
-      __ Abort(kConversionFromImpossibleValue);
-      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -746,8 +743,8 @@
 
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result =
-          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result = AssembleDeoptimizerCall(
+          deopt_state_id, bailout_type, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -2241,13 +2238,14 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+    SourcePosition pos) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   DeoptimizeReason deoptimization_reason =
       GetDeoptimizationReason(deoptimization_id);
-  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index 0fe6a4b..757eee9 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -211,6 +211,10 @@
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitStore(Node* node) {
   X87OperandGenerator g(this);
@@ -223,7 +227,7 @@
   MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    DCHECK(CanBeTaggedPointer(rep));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
index 13fec35..7681eeb 100644
--- a/src/compiler/zone-pool.cc
+++ b/src/compiler/zone-pool.cc
@@ -64,7 +64,7 @@
   }
 }
 
-ZonePool::ZonePool(base::AccountingAllocator* allocator)
+ZonePool::ZonePool(AccountingAllocator* allocator)
     : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
 
 ZonePool::~ZonePool() {
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-pool.h
index 44a649f..7a3fe75 100644
--- a/src/compiler/zone-pool.h
+++ b/src/compiler/zone-pool.h
@@ -9,7 +9,7 @@
 #include <set>
 #include <vector>
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -61,7 +61,7 @@
     DISALLOW_COPY_AND_ASSIGN(StatsScope);
   };
 
-  explicit ZonePool(base::AccountingAllocator* allocator);
+  explicit ZonePool(AccountingAllocator* allocator);
   ~ZonePool();
 
   size_t GetMaxAllocatedBytes();
@@ -82,7 +82,7 @@
   Stats stats_;
   size_t max_allocated_bytes_;
   size_t total_deleted_bytes_;
-  base::AccountingAllocator* allocator_;
+  AccountingAllocator* allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(ZonePool);
 };
diff --git a/src/contexts.cc b/src/contexts.cc
index b3cf255..4fb3c83 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -57,15 +57,16 @@
 
 
 bool Context::is_declaration_context() {
-  if (IsFunctionContext() || IsNativeContext() || IsScriptContext()) {
+  if (IsFunctionContext() || IsNativeContext() || IsScriptContext() ||
+      IsModuleContext()) {
     return true;
   }
   if (!IsBlockContext()) return false;
   Object* ext = extension();
   // If we have the special extension, we immediately know it must be a
   // declaration scope. That's just a small performance shortcut.
-  return ext->IsSloppyBlockWithEvalContextExtension()
-      || ScopeInfo::cast(ext)->is_declaration_scope();
+  return ext->IsContextExtension() ||
+         ScopeInfo::cast(ext)->is_declaration_scope();
 }
 
 
@@ -93,36 +94,47 @@
   HeapObject* object = extension();
   if (object->IsTheHole(GetIsolate())) return nullptr;
   if (IsBlockContext()) {
-    if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
-    object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
+    if (!object->IsContextExtension()) return nullptr;
+    object = JSObject::cast(ContextExtension::cast(object)->extension());
   }
   DCHECK(object->IsJSContextExtensionObject() ||
          (IsNativeContext() && object->IsJSGlobalObject()));
   return JSObject::cast(object);
 }
 
-
 JSReceiver* Context::extension_receiver() {
   DCHECK(IsNativeContext() || IsWithContext() ||
          IsFunctionContext() || IsBlockContext());
-  return IsWithContext() ? JSReceiver::cast(extension()) : extension_object();
+  return IsWithContext() ? JSReceiver::cast(
+                               ContextExtension::cast(extension())->extension())
+                         : extension_object();
 }
 
-
 ScopeInfo* Context::scope_info() {
-  DCHECK(IsModuleContext() || IsScriptContext() || IsBlockContext());
+  DCHECK(!IsNativeContext());
+  if (IsFunctionContext() || IsModuleContext()) {
+    return closure()->shared()->scope_info();
+  }
   HeapObject* object = extension();
-  if (object->IsSloppyBlockWithEvalContextExtension()) {
-    DCHECK(IsBlockContext());
-    object = SloppyBlockWithEvalContextExtension::cast(object)->scope_info();
+  if (object->IsContextExtension()) {
+    DCHECK(IsBlockContext() || IsCatchContext() || IsWithContext() ||
+           IsDebugEvaluateContext());
+    object = ContextExtension::cast(object)->scope_info();
   }
   return ScopeInfo::cast(object);
 }
 
+Module* Context::module() {
+  Context* current = this;
+  while (!current->IsModuleContext()) {
+    current = current->previous();
+  }
+  return Module::cast(current->extension());
+}
 
 String* Context::catch_name() {
   DCHECK(IsCatchContext());
-  return String::cast(extension());
+  return String::cast(ContextExtension::cast(extension())->extension());
 }
 
 
@@ -178,13 +190,14 @@
 
 static PropertyAttributes GetAttributesForMode(VariableMode mode) {
   DCHECK(IsDeclaredVariableMode(mode));
-  return IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+  return mode == CONST ? READ_ONLY : NONE;
 }
 
 Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
                                int* index, PropertyAttributes* attributes,
                                InitializationFlag* init_flag,
                                VariableMode* variable_mode) {
+  DCHECK(!IsModuleContext());
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
@@ -248,8 +261,14 @@
           object->IsJSContextExtensionObject()) {
         maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
       } else if (context->IsWithContext()) {
-        // A with context will never bind "this".
-        if (name->Equals(*isolate->factory()->this_string())) {
+        // A with context will never bind "this", but debug-eval may look into
+        // a with context when resolving "this". Other synthetic variables such
+        // as new.target may be resolved as DYNAMIC_LOCAL due to bug v8:5405 ,
+        // skipping them here serves as a workaround until a more thorough
+        // fix can be applied.
+        // TODO(v8:5405): Replace this check with a DCHECK when resolution of
+        // of synthetic variables does not go through this code path.
+        if (ScopeInfo::VariableIsSynthetic(*name)) {
           maybe = Just(ABSENT);
         } else {
           LookupIterator it(object, name, object);
@@ -307,10 +326,11 @@
       }
 
       // Check the slot corresponding to the intermediate context holding
-      // only the function name variable.
-      if (follow_context_chain && context->IsFunctionContext()) {
-        VariableMode mode;
-        int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
+      // only the function name variable. It's conceptually (and spec-wise)
+      // in an outer scope of the function's declaration scope.
+      if (follow_context_chain && (flags & STOP_AT_DECLARATION_SCOPE) == 0 &&
+          context->IsFunctionContext()) {
+        int function_index = scope_info->FunctionContextSlotIndex(*name);
         if (function_index >= 0) {
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
@@ -318,9 +338,8 @@
           }
           *index = function_index;
           *attributes = READ_ONLY;
-          DCHECK(mode == CONST_LEGACY || mode == CONST);
           *init_flag = kCreatedInitialized;
-          *variable_mode = mode;
+          *variable_mode = CONST;
           return context;
         }
       }
@@ -339,18 +358,21 @@
       }
     } else if (context->IsDebugEvaluateContext()) {
       // Check materialized locals.
-      Object* obj = context->get(EXTENSION_INDEX);
-      if (obj->IsJSReceiver()) {
-        Handle<JSReceiver> extension(JSReceiver::cast(obj));
-        LookupIterator it(extension, name, extension);
-        Maybe<bool> found = JSReceiver::HasProperty(&it);
-        if (found.FromMaybe(false)) {
-          *attributes = NONE;
-          return extension;
+      Object* ext = context->get(EXTENSION_INDEX);
+      if (ext->IsContextExtension()) {
+        Object* obj = ContextExtension::cast(ext)->extension();
+        if (obj->IsJSReceiver()) {
+          Handle<JSReceiver> extension(JSReceiver::cast(obj));
+          LookupIterator it(extension, name, extension);
+          Maybe<bool> found = JSReceiver::HasProperty(&it);
+          if (found.FromMaybe(false)) {
+            *attributes = NONE;
+            return extension;
+          }
         }
       }
       // Check the original context, but do not follow its context chain.
-      obj = context->get(WRAPPED_CONTEXT_INDEX);
+      Object* obj = context->get(WRAPPED_CONTEXT_INDEX);
       if (obj->IsContext()) {
         Handle<Object> result =
             Context::cast(obj)->Lookup(name, DONT_FOLLOW_CHAINS, index,
@@ -387,25 +409,6 @@
 }
 
 
-void Context::InitializeGlobalSlots() {
-  DCHECK(IsScriptContext());
-  DisallowHeapAllocation no_gc;
-
-  ScopeInfo* scope_info = this->scope_info();
-
-  int context_globals = scope_info->ContextGlobalCount();
-  if (context_globals > 0) {
-    PropertyCell* empty_cell = GetHeap()->empty_property_cell();
-
-    int context_locals = scope_info->ContextLocalCount();
-    int index = Context::MIN_CONTEXT_SLOTS + context_locals;
-    for (int i = 0; i < context_globals; i++) {
-      set(index++, empty_cell);
-    }
-  }
-}
-
-
 void Context::AddOptimizedFunction(JSFunction* function) {
   DCHECK(IsNativeContext());
   Isolate* isolate = GetIsolate();
@@ -544,6 +547,17 @@
 
 #undef COMPARE_NAME
 
+#define COMPARE_NAME(index, type, name) \
+  if (strncmp(string, #name, length) == 0) return index;
+
+int Context::IntrinsicIndexForName(const unsigned char* unsigned_string,
+                                   int length) {
+  const char* string = reinterpret_cast<const char*>(unsigned_string);
+  NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
+  return kNotFound;
+}
+
+#undef COMPARE_NAME
 
 #ifdef DEBUG
 
diff --git a/src/contexts.h b/src/contexts.h
index d73135f..b927d05 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -36,6 +36,7 @@
 
 #define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                           \
   V(IS_ARRAYLIKE, JSFunction, is_arraylike)                             \
+  V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal)       \
   V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site)   \
   V(MAKE_ERROR_INDEX, JSFunction, make_error)                           \
   V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error)               \
@@ -59,44 +60,53 @@
   V(MATH_FLOOR_INDEX, JSFunction, math_floor)                           \
   V(MATH_POW_INDEX, JSFunction, math_pow)
 
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                   \
-  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                           \
-  V(ARRAY_POP_INDEX, JSFunction, array_pop)                                 \
-  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                               \
-  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                             \
-  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                           \
-  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                             \
-  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                         \
-  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)         \
-  V(ASYNC_FUNCTION_AWAIT_INDEX, JSFunction, async_function_await)           \
-  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                   \
-  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                       \
-  V(ERROR_TO_STRING, JSFunction, error_to_string)                           \
-  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)             \
-  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                     \
-  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                        \
-  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                              \
-  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                              \
-  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                              \
-  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                           \
-  V(OBJECT_TO_STRING, JSFunction, object_to_string)                         \
-  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                         \
-  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                       \
-  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                   \
-  V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,              \
-    promise_has_user_defined_reject_handler)                                \
-  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                       \
-  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                     \
-  V(PROMISE_CREATE_RESOLVED_INDEX, JSFunction, promise_create_resolved)     \
-  V(PROMISE_CREATE_REJECTED_INDEX, JSFunction, promise_create_rejected)     \
-  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                           \
-  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)           \
-  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function)   \
-  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                              \
-  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                        \
-  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                              \
-  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)         \
-  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)             \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                 \
+  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                         \
+  V(ARRAY_POP_INDEX, JSFunction, array_pop)                               \
+  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                             \
+  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                           \
+  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                         \
+  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                           \
+  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                       \
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)       \
+  V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction,                        \
+    async_function_await_caught)                                          \
+  V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction,                      \
+    async_function_await_uncaught)                                        \
+  V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction,                      \
+    async_function_promise_create)                                        \
+  V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction,                     \
+    async_function_promise_release)                                       \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                 \
+  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                     \
+  V(ERROR_TO_STRING, JSFunction, error_to_string)                         \
+  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)           \
+  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                   \
+  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                      \
+  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                            \
+  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                            \
+  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                            \
+  V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance)       \
+  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                         \
+  V(OBJECT_TO_STRING, JSFunction, object_to_string)                       \
+  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                       \
+  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                     \
+  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                 \
+  V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,            \
+    promise_has_user_defined_reject_handler)                              \
+  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                     \
+  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                   \
+  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                         \
+  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)         \
+  V(REGEXP_LAST_MATCH_INFO_INDEX, JSObject, regexp_last_match_info)       \
+  V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction,                      \
+    reject_promise_no_debug_event)                                        \
+  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                            \
+  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                      \
+  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                            \
+  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)       \
+  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)           \
   V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
 
 #define NATIVE_CONTEXT_FIELDS(V)                                               \
@@ -145,6 +155,7 @@
   V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
   V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype)          \
   V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype)  \
+  V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype)    \
   V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype)        \
   V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun)                        \
   V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function)                      \
@@ -204,7 +215,11 @@
   V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map)                           \
   V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor)        \
   V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor)    \
+  V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor)          \
+  V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor)        \
   V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym)                            \
+  V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym)                              \
+  V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym)                            \
   V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym)                        \
   V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map)           \
   V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map)   \
@@ -227,6 +242,7 @@
   V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun)                        \
   V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun)        \
   V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function)                    \
+  V(CURRENT_MODULE_INDEX, Module, current_module)                              \
   NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                                        \
   NATIVE_CONTEXT_IMPORTED_FIELDS(V)
 
@@ -298,18 +314,29 @@
 //
 // [ previous  ]  A pointer to the previous context.
 //
-// [ extension ]  A pointer to an extension JSObject, or "the hole". Used to
-//                implement 'with' statements and dynamic declarations
-//                (through 'eval'). The object in a 'with' statement is
-//                stored in the extension slot of a 'with' context.
-//                Dynamically declared variables/functions are also added
-//                to lazily allocated extension object. Context::Lookup
-//                searches the extension object for properties.
-//                For script and block contexts, contains the respective
-//                ScopeInfo. For block contexts representing sloppy declaration
-//                block scopes, it may also be a struct being a
-//                SloppyBlockWithEvalContextExtension, pairing the ScopeInfo
-//                with an extension object.
+// [ extension ]  Additional data.
+//
+//                For script contexts, it contains the respective ScopeInfo.
+//
+//                For catch contexts, it contains a ContextExtension object
+//                consisting of the ScopeInfo and the name of the catch
+//                variable.
+//
+//                For module contexts, it contains the module object.
+//
+//                For block contexts, it contains either the respective
+//                ScopeInfo or a ContextExtension object consisting of the
+//                ScopeInfo and an "extension object" (see below).
+//
+//                For with contexts, it contains a ContextExtension object
+//                consisting of the ScopeInfo and an "extension object".
+//
+//                An "extension object" is used to dynamically extend a context
+//                with additional variables, namely in the implementation of the
+//                'with' construct and the 'eval' construct.  For instance,
+//                Context::Lookup also searches the extension object for
+//                properties.  (Storing the extension object is the original
+//                purpose of this context slot, hence the name.)
 //
 // [ native_context ]  A pointer to the native context.
 //
@@ -387,6 +414,10 @@
   ScopeInfo* scope_info();
   String* catch_name();
 
+  // Find the module context (assuming there is one) and return the associated
+  // module object.
+  Module* module();
+
   // Get the context where var declarations will be hoisted to, which
   // may be the context itself.
   Context* declaration_context();
@@ -400,7 +431,7 @@
   void set_global_proxy(JSObject* global);
 
   // Get the JSGlobalObject object.
-  JSGlobalObject* global_object();
+  V8_EXPORT_PRIVATE JSGlobalObject* global_object();
 
   // Get the script context by traversing the context chain.
   Context* script_context();
@@ -423,9 +454,6 @@
 
   inline bool HasSameSecurityTokenAs(Context* that);
 
-  // Initializes global variable bindings in given script context.
-  void InitializeGlobalSlots();
-
   // A native context holds a list of all functions with optimized code.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
@@ -444,6 +472,7 @@
 
   static int ImportedFieldIndexForName(Handle<String> name);
   static int IntrinsicIndexForName(Handle<String> name);
+  static int IntrinsicIndexForName(const unsigned char* name, int length);
 
 #define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   inline void set_##name(type* value);                    \
@@ -525,7 +554,8 @@
  private:
 #ifdef DEBUG
   // Bootstrapping-aware type checks.
-  static bool IsBootstrappingOrNativeContext(Isolate* isolate, Object* object);
+  V8_EXPORT_PRIVATE static bool IsBootstrappingOrNativeContext(Isolate* isolate,
+                                                               Object* object);
   static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
 #endif
 
diff --git a/src/counters-inl.h b/src/counters-inl.h
index c8c06d2..303e5e3 100644
--- a/src/counters-inl.h
+++ b/src/counters-inl.h
@@ -11,10 +11,18 @@
 namespace internal {
 
 RuntimeCallTimerScope::RuntimeCallTimerScope(
+    Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
+  if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+                  FLAG_runtime_call_stats)) {
+    Initialize(isolate, counter_id);
+  }
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
     HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
-  if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
-    isolate_ = heap_object->GetIsolate();
-    RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+  if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+                  FLAG_runtime_call_stats)) {
+    Initialize(heap_object->GetIsolate(), counter_id);
   }
 }
 
diff --git a/src/counters.cc b/src/counters.cc
index 8a5908c..c4e8646 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -282,18 +282,15 @@
 }
 
 // static
-void RuntimeCallStats::Enter(Isolate* isolate, RuntimeCallTimer* timer,
+void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
                              CounterId counter_id) {
-  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
   RuntimeCallCounter* counter = &(stats->*counter_id);
   timer->Start(counter, stats->current_timer_);
   stats->current_timer_ = timer;
 }
 
 // static
-void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
-  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
-
+void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
   if (stats->current_timer_ == timer) {
     stats->current_timer_ = timer->Stop();
   } else {
@@ -307,9 +304,8 @@
 }
 
 // static
-void RuntimeCallStats::CorrectCurrentCounterId(Isolate* isolate,
+void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
                                                CounterId counter_id) {
-  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
   DCHECK_NOT_NULL(stats->current_timer_);
   RuntimeCallCounter* counter = &(stats->*counter_id);
   stats->current_timer_->counter_ = counter;
@@ -342,7 +338,9 @@
 }
 
 void RuntimeCallStats::Reset() {
-  if (!FLAG_runtime_call_stats) return;
+  if (!FLAG_runtime_call_stats &&
+      !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())
+    return;
 #define RESET_COUNTER(name) this->name.Reset();
   FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
 #undef RESET_COUNTER
@@ -362,6 +360,41 @@
 #define RESET_COUNTER(name) this->Handler_##name.Reset();
   FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
 #undef RESET_COUNTER
+
+  in_use_ = true;
+}
+
+std::string RuntimeCallStats::Dump() {
+  buffer_.str(std::string());
+  buffer_.clear();
+  buffer_ << "{";
+#define DUMP_COUNTER(name) \
+  if (this->name.count > 0) this->name.Dump(buffer_);
+  FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name, nargs, result_size) \
+  if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
+  FOR_EACH_INTRINSIC(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+  if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
+  BUILTIN_LIST_C(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+  if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
+  FOR_EACH_API_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+  if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
+  FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+  buffer_ << "\"END\":[]}";
+  in_use_ = false;
+  return buffer_.str();
 }
 
 }  // namespace internal
diff --git a/src/counters.h b/src/counters.h
index 59627f1..707ae9f 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -11,8 +11,10 @@
 #include "src/base/platform/time.h"
 #include "src/builtins/builtins.h"
 #include "src/globals.h"
+#include "src/isolate.h"
 #include "src/objects.h"
 #include "src/runtime/runtime.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -566,12 +568,15 @@
   V(Message_GetLineNumber)                                 \
   V(Message_GetSourceLine)                                 \
   V(Message_GetStartColumn)                                \
+  V(Module_Evaluate)                                       \
+  V(Module_Instantiate)                                    \
   V(NumberObject_New)                                      \
   V(NumberObject_NumberValue)                              \
   V(Object_CallAsConstructor)                              \
   V(Object_CallAsFunction)                                 \
   V(Object_CreateDataProperty)                             \
   V(Object_DefineOwnProperty)                              \
+  V(Object_DefineProperty)                                 \
   V(Object_Delete)                                         \
   V(Object_DeleteProperty)                                 \
   V(Object_ForceSet)                                       \
@@ -657,7 +662,10 @@
   V(UnboundScript_GetName)                                 \
   V(UnboundScript_GetSourceMappingURL)                     \
   V(UnboundScript_GetSourceURL)                            \
-  V(Value_TypeOf)
+  V(Value_TypeOf)                                          \
+  V(ValueDeserializer_ReadHeader)                          \
+  V(ValueDeserializer_ReadValue)                           \
+  V(ValueSerializer_WriteValue)
 
 #define FOR_EACH_MANUAL_COUNTER(V)                  \
   V(AccessorGetterCallback)                         \
@@ -674,13 +682,18 @@
   V(DeoptimizeCode)                                 \
   V(FunctionCallback)                               \
   V(GC)                                             \
+  V(GenericNamedPropertyDefinerCallback)            \
   V(GenericNamedPropertyDeleterCallback)            \
+  V(GenericNamedPropertyDescriptorCallback)         \
   V(GenericNamedPropertyQueryCallback)              \
   V(GenericNamedPropertySetterCallback)             \
+  V(IndexedPropertyDefinerCallback)                 \
   V(IndexedPropertyDeleterCallback)                 \
+  V(IndexedPropertyDescriptorCallback)              \
   V(IndexedPropertyGetterCallback)                  \
   V(IndexedPropertyQueryCallback)                   \
   V(IndexedPropertySetterCallback)                  \
+  V(InvokeApiInterruptCallbacks)                    \
   V(InvokeFunctionCallback)                         \
   V(JS_Execution)                                   \
   V(Map_SetPrototype)                               \
@@ -765,67 +778,52 @@
 
   // Starting measuring the time for a function. This will establish the
   // connection to the parent counter for properly calculating the own times.
-  static void Enter(Isolate* isolate, RuntimeCallTimer* timer,
+  static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
                     CounterId counter_id);
 
   // Leave a scope for a measured runtime function. This will properly add
   // the time delta to the current_counter and subtract the delta from its
   // parent.
-  static void Leave(Isolate* isolate, RuntimeCallTimer* timer);
+  static void Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer);
 
   // Set counter id for the innermost measurement. It can be used to refine
   // event kind when a runtime entry counter is too generic.
-  static void CorrectCurrentCounterId(Isolate* isolate, CounterId counter_id);
+  static void CorrectCurrentCounterId(RuntimeCallStats* stats,
+                                      CounterId counter_id);
 
   void Reset();
-  void Print(std::ostream& os);
+  V8_NOINLINE void Print(std::ostream& os);
+  V8_NOINLINE std::string Dump();
 
-  RuntimeCallStats() { Reset(); }
+  RuntimeCallStats() {
+    Reset();
+    in_use_ = false;
+  }
+
   RuntimeCallTimer* current_timer() { return current_timer_; }
+  bool InUse() { return in_use_; }
 
  private:
+  std::stringstream buffer_;
   // Counter to track recursive time events.
   RuntimeCallTimer* current_timer_ = NULL;
+  // Used to track nested tracing scopes.
+  bool in_use_;
 };
 
-#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
-  do {                                                  \
-    if (FLAG_runtime_call_stats) {                      \
-      RuntimeCallStats::CorrectCurrentCounterId(        \
-          isolate, &RuntimeCallStats::counter_name);    \
-    }                                                   \
+#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name)                 \
+  do {                                                                  \
+    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
+                    FLAG_runtime_call_stats)) {                         \
+      RuntimeCallStats::CorrectCurrentCounterId(                        \
+          isolate->counters()->runtime_call_stats(),                    \
+          &RuntimeCallStats::counter_name);                             \
+    }                                                                   \
   } while (false)
 
 #define TRACE_HANDLER_STATS(isolate, counter_name) \
   TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
 
-// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
-// the time of C++ scope.
-class RuntimeCallTimerScope {
- public:
-  inline RuntimeCallTimerScope(Isolate* isolate,
-                               RuntimeCallStats::CounterId counter_id) {
-    if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
-      isolate_ = isolate;
-      RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
-    }
-  }
-  // This constructor is here just to avoid calling GetIsolate() when the
-  // stats are disabled and the isolate is not directly available.
-  inline RuntimeCallTimerScope(HeapObject* heap_object,
-                               RuntimeCallStats::CounterId counter_id);
-
-  inline ~RuntimeCallTimerScope() {
-    if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
-      RuntimeCallStats::Leave(isolate_, &timer_);
-    }
-  }
-
- private:
-  Isolate* isolate_;
-  RuntimeCallTimer timer_;
-};
-
 #define HISTOGRAM_RANGE_LIST(HR)                                              \
   /* Generic range histograms */                                              \
   HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21)        \
@@ -836,6 +834,9 @@
   HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)             \
   HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20)        \
   HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)                      \
+  HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22)    \
+  HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22)                  \
+  HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22)                         \
   /* Asm/Wasm. */                                                             \
   HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
 
@@ -1238,6 +1239,36 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
 };
 
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+  inline RuntimeCallTimerScope(Isolate* isolate,
+                               RuntimeCallStats::CounterId counter_id);
+  // This constructor is here just to avoid calling GetIsolate() when the
+  // stats are disabled and the isolate is not directly available.
+  inline RuntimeCallTimerScope(HeapObject* heap_object,
+                               RuntimeCallStats::CounterId counter_id);
+
+  inline ~RuntimeCallTimerScope() {
+    if (V8_UNLIKELY(isolate_ != nullptr)) {
+      RuntimeCallStats::Leave(isolate_->counters()->runtime_call_stats(),
+                              &timer_);
+    }
+  }
+
+ private:
+  V8_INLINE void Initialize(Isolate* isolate,
+                            RuntimeCallStats::CounterId counter_id) {
+    isolate_ = isolate;
+    RuntimeCallStats::Enter(isolate_->counters()->runtime_call_stats(), &timer_,
+                            counter_id);
+  }
+
+  Isolate* isolate_ = nullptr;
+  RuntimeCallTimer timer_;
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index 324dcfe..8c4b735 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -304,15 +304,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -345,15 +336,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -877,7 +859,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1019,6 +1001,9 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1027,15 +1012,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2180,26 +2170,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result =
-      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2276,20 +2246,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), r1);
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index 80fbe81..abdfbdd 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -132,9 +132,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2005,33 +2003,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2068,34 +2039,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index ee3e54b..f2cc4b4 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -152,7 +152,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in r1.
@@ -160,7 +160,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(r1);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2602,20 +2602,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).is(r0));
@@ -3860,21 +3846,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   if (instr->index()->IsConstantOperand()) {
@@ -4071,21 +4042,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5063,7 +5019,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5165,7 +5121,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h
index 533f4c8..26b7fb5 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/src/crankshaft/arm/lithium-codegen-arm.h
@@ -311,8 +311,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index 8067a6a..8a9ce42 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -252,15 +252,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   std::ostringstream os;
@@ -271,15 +262,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if string_compare(");
   left()->PrintTo(stream);
@@ -726,7 +708,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -981,6 +963,9 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -989,15 +974,30 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  if (i < descriptor.GetParameterCount()) {
+    int argc = descriptor.GetParameterCount() - i;
+    AddInstruction(new (zone()) LPreparePushArguments(argc), instr);
+    LPushArguments* push_args = new (zone()) LPushArguments(zone());
+    for (; i < descriptor.GetParameterCount(); i++) {
+      if (push_args->ShouldSplitPush()) {
+        AddInstruction(push_args, instr);
+        push_args = new (zone()) LPushArguments(zone());
+      }
+      op = UseRegisterAtStart(instr->OperandAt(
+          i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+      push_args->AddArgument(op);
+    }
+    AddInstruction(push_args, instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
                                                                 ops,
@@ -2209,26 +2209,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result = new (zone())
-      LStoreKeyedGeneric(context, object, key, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   // TODO(jbramley): It might be beneficial to allow value to be a constant in
   // some cases. x64 makes use of this with FLAG_track_fields, for example.
@@ -2258,21 +2238,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), x1);
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 782da09..9891f9e 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -143,9 +143,7 @@
   V(StoreKeyedExternal)                      \
   V(StoreKeyedFixed)                         \
   V(StoreKeyedFixedDouble)                   \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2336,34 +2334,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
  public:
   LStoreNamedField(LOperand* object, LOperand* value,
@@ -2390,33 +2360,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
  public:
   LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index ce5813b..a4aa275 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -583,14 +583,14 @@
   Comment(";;; Prologue begin");
 
   // Allocate a local context if needed.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in x1.
     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
-      __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
+      __ Mov(x10, Operand(info()->scope()->scope_info()));
       __ Push(x1, x10);
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
@@ -1403,7 +1403,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
   } else {
     Register size = ToRegister32(instr->size());
@@ -1499,7 +1499,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -1973,7 +1973,16 @@
     generator.AfterCall();
   }
 
-  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+  HCallWithDescriptor* hinstr = instr->hydrogen();
+  RecordPushedArgumentsDelta(hinstr->argument_delta());
+
+  // HCallWithDescriptor instruction is translated to zero or more
+  // LPushArguments (they handle parameters passed on the stack) followed by
+  // a LCallWithDescriptor. Each LPushArguments instruction generated records
+  // the number of arguments pushed thus we need to offset them here.
+  // The |argument_delta()| used above "knows" only about JS parameters while
+  // we are dealing here with particular calling convention details.
+  RecordPushedArgumentsDelta(-hinstr->descriptor().GetStackParameterCount());
 }
 
 
@@ -3021,20 +3030,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Mov(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ Mov(slot_register, Smi::FromInt(index));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).Is(x0));
@@ -4933,21 +4928,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5131,21 +5111,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->left()).Is(x1));
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h
index 2fc6f96..ca04fa2 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -186,8 +186,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   // Emits optimized code for %_IsString(x).  Preserves input register.
   // Returns the condition on which a final split to
diff --git a/src/crankshaft/compilation-phase.h b/src/crankshaft/compilation-phase.h
index 99e24c7..8d6468d 100644
--- a/src/crankshaft/compilation-phase.h
+++ b/src/crankshaft/compilation-phase.h
@@ -6,8 +6,9 @@
 #define V8_CRANKSHAFT_COMPILATION_PHASE_H_
 
 #include "src/allocation.h"
-#include "src/compiler.h"
-#include "src/zone.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compilation-info.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-bce.cc b/src/crankshaft/hydrogen-bce.cc
index d00d8ce..7910c5b 100644
--- a/src/crankshaft/hydrogen-bce.cc
+++ b/src/crankshaft/hydrogen-bce.cc
@@ -307,24 +307,25 @@
   return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
 }
 
-
 BoundsCheckTable::BoundsCheckTable(Zone* zone)
-    : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
-                  ZoneAllocationPolicy(zone)) { }
-
+    : CustomMatcherZoneHashMap(BoundsCheckKeyMatch,
+                               ZoneHashMap::kDefaultHashMapCapacity,
+                               ZoneAllocationPolicy(zone)) {}
 
 BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
                                                      Zone* zone) {
   return reinterpret_cast<BoundsCheckBbData**>(
-      &(ZoneHashMap::LookupOrInsert(key, key->Hash(),
-                                    ZoneAllocationPolicy(zone))->value));
+      &(CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
+                                                 ZoneAllocationPolicy(zone))
+            ->value));
 }
 
 
 void BoundsCheckTable::Insert(BoundsCheckKey* key,
                               BoundsCheckBbData* data,
                               Zone* zone) {
-  ZoneHashMap::LookupOrInsert(key, key->Hash(), ZoneAllocationPolicy(zone))
+  CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
+                                           ZoneAllocationPolicy(zone))
       ->value = data;
 }
 
diff --git a/src/crankshaft/hydrogen-bce.h b/src/crankshaft/hydrogen-bce.h
index e819ffc..237fb95 100644
--- a/src/crankshaft/hydrogen-bce.h
+++ b/src/crankshaft/hydrogen-bce.h
@@ -13,7 +13,7 @@
 
 class BoundsCheckBbData;
 class BoundsCheckKey;
-class BoundsCheckTable : private ZoneHashMap {
+class BoundsCheckTable : private CustomMatcherZoneHashMap {
  public:
   explicit BoundsCheckTable(Zone* zone);
 
diff --git a/src/crankshaft/hydrogen-flow-engine.h b/src/crankshaft/hydrogen-flow-engine.h
index 3a488dd..149c99b 100644
--- a/src/crankshaft/hydrogen-flow-engine.h
+++ b/src/crankshaft/hydrogen-flow-engine.h
@@ -5,9 +5,9 @@
 #ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
 #define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
 
-#include "src/crankshaft/hydrogen.h"
 #include "src/crankshaft/hydrogen-instructions.h"
-#include "src/zone.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-gvn.h b/src/crankshaft/hydrogen-gvn.h
index 9a8d407..5f11737 100644
--- a/src/crankshaft/hydrogen-gvn.h
+++ b/src/crankshaft/hydrogen-gvn.h
@@ -7,9 +7,9 @@
 
 #include <iosfwd>
 
-#include "src/crankshaft/hydrogen.h"
 #include "src/crankshaft/hydrogen-instructions.h"
-#include "src/zone.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index 9fed961..3a0aaa7 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -831,7 +831,6 @@
     case HValue::kStoreCodeEntry:
     case HValue::kStoreKeyed:
     case HValue::kStoreNamedField:
-    case HValue::kStoreNamedGeneric:
     case HValue::kStringCharCodeAt:
     case HValue::kStringCharFromCode:
     case HValue::kThisFunction:
@@ -881,7 +880,6 @@
     case HValue::kSimulate:
     case HValue::kStackCheck:
     case HValue::kStoreContextSlot:
-    case HValue::kStoreKeyedGeneric:
     case HValue::kStringAdd:
     case HValue::kStringCompareAndBranch:
     case HValue::kSub:
@@ -3039,14 +3037,6 @@
 }
 
 
-std::ostream& HStoreNamedGeneric::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  Handle<String> n = Handle<String>::cast(name());
-  return os << NameOf(object()) << "." << n->ToCString().get() << " = "
-            << NameOf(value());
-}
-
-
 std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const {  // NOLINT
   os << NameOf(object()) << access_ << " = " << NameOf(value());
   if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3070,13 +3060,6 @@
 }
 
 
-std::ostream& HStoreKeyedGeneric::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  return os << NameOf(object()) << "[" << NameOf(key())
-            << "] = " << NameOf(value());
-}
-
-
 std::ostream& HTransitionElementsKind::PrintDataTo(
     std::ostream& os) const {  // NOLINT
   os << NameOf(object());
@@ -3236,8 +3219,8 @@
   int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
 
   // Since we clear the first word after folded memory, we cannot use the
-  // whole Page::kMaxRegularHeapObjectSize memory.
-  if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
+  // whole kMaxRegularHeapObjectSize memory.
+  if (new_dominator_size > kMaxRegularHeapObjectSize - kPointerSize) {
     if (FLAG_trace_allocation_folding) {
       PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
           id(), Mnemonic(), dominator_allocate->id(),
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index 41b1e1b..cfede98 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -9,6 +9,7 @@
 #include <iosfwd>
 
 #include "src/allocation.h"
+#include "src/ast/ast.h"
 #include "src/base/bits.h"
 #include "src/bit-vector.h"
 #include "src/code-stubs.h"
@@ -19,7 +20,7 @@
 #include "src/globals.h"
 #include "src/small-pointer-list.h"
 #include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -36,6 +37,7 @@
 class HValue;
 class LInstruction;
 class LChunkBuilder;
+class SmallMapList;
 
 #define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
   V(ArithmeticBinaryOperation)                \
@@ -131,9 +133,7 @@
   V(StoreCodeEntry)                           \
   V(StoreContextSlot)                         \
   V(StoreKeyed)                               \
-  V(StoreKeyedGeneric)                        \
   V(StoreNamedField)                          \
-  V(StoreNamedGeneric)                        \
   V(StringAdd)                                \
   V(StringCharCodeAt)                         \
   V(StringCharFromCode)                       \
@@ -2176,7 +2176,8 @@
     } else {
       int par_index = index - 2;
       DCHECK(par_index < GetParameterCount());
-      return RepresentationFromType(descriptor_.GetParameterType(par_index));
+      return RepresentationFromMachineType(
+          descriptor_.GetParameterType(par_index));
     }
   }
 
@@ -2215,7 +2216,7 @@
                       TailCallMode syntactic_tail_call_mode,
                       TailCallMode tail_call_mode, Zone* zone)
       : descriptor_(descriptor),
-        values_(GetParameterCount() + 1, zone),
+        values_(GetParameterCount() + 1, zone),  // +1 here is for target.
         argument_count_(argument_count),
         bit_field_(
             TailCallModeField::encode(tail_call_mode) |
@@ -2237,7 +2238,7 @@
   }
 
   int GetParameterCount() const {
-    return descriptor_.GetRegisterParameterCount() + 1;
+    return descriptor_.GetParameterCount() + 1;  // +1 here is for context.
   }
 
   void InternalSetOperandAt(int index, HValue* value) final {
@@ -6326,52 +6327,6 @@
   uint32_t bit_field_;
 };
 
-class HStoreNamedGeneric final : public HTemplateInstruction<3> {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreNamedGeneric, HValue*,
-                                              Handle<Name>, HValue*,
-                                              LanguageMode,
-                                              Handle<TypeFeedbackVector>,
-                                              FeedbackVectorSlot);
-  HValue* object() const { return OperandAt(0); }
-  HValue* value() const { return OperandAt(1); }
-  HValue* context() const { return OperandAt(2); }
-  Handle<Name> name() const { return name_; }
-  LanguageMode language_mode() const { return language_mode_; }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
-
- private:
-  HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
-                     HValue* value, LanguageMode language_mode,
-                     Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : name_(name),
-        feedback_vector_(vector),
-        slot_(slot),
-        language_mode_(language_mode) {
-    SetOperandAt(0, object);
-    SetOperandAt(1, value);
-    SetOperandAt(2, context);
-    SetAllSideEffects();
-  }
-
-  Handle<Name> name_;
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-  LanguageMode language_mode_;
-};
-
 class HStoreKeyed final : public HTemplateInstruction<4>,
                           public ArrayInstructionInterface {
  public:
@@ -6554,50 +6509,6 @@
   HValue* dominator_;
 };
 
-class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreKeyedGeneric, HValue*,
-                                              HValue*, HValue*, LanguageMode,
-                                              Handle<TypeFeedbackVector>,
-                                              FeedbackVectorSlot);
-
-  HValue* object() const { return OperandAt(0); }
-  HValue* key() const { return OperandAt(1); }
-  HValue* value() const { return OperandAt(2); }
-  HValue* context() const { return OperandAt(3); }
-  LanguageMode language_mode() const { return language_mode_; }
-
-  Representation RequiredInputRepresentation(int index) override {
-    // tagged[tagged] = tagged
-    return Representation::Tagged();
-  }
-
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
-
- private:
-  HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
-                     HValue* value, LanguageMode language_mode,
-                     Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : feedback_vector_(vector), slot_(slot), language_mode_(language_mode) {
-    SetOperandAt(0, object);
-    SetOperandAt(1, key);
-    SetOperandAt(2, value);
-    SetOperandAt(3, context);
-    SetAllSideEffects();
-  }
-
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-  LanguageMode language_mode_;
-};
-
 class HTransitionElementsKind final : public HTemplateInstruction<2> {
  public:
   inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
diff --git a/src/crankshaft/hydrogen-osr.h b/src/crankshaft/hydrogen-osr.h
index 0610b42..3bd9b6e 100644
--- a/src/crankshaft/hydrogen-osr.h
+++ b/src/crankshaft/hydrogen-osr.h
@@ -6,7 +6,7 @@
 #define V8_CRANKSHAFT_HYDROGEN_OSR_H_
 
 #include "src/crankshaft/hydrogen.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
index 20d50d8..684e6ad 100644
--- a/src/crankshaft/hydrogen-types.cc
+++ b/src/crankshaft/hydrogen-types.cc
@@ -12,17 +12,17 @@
 namespace internal {
 
 // static
-HType HType::FromType(Type* type) {
-  if (Type::Any()->Is(type)) return HType::Any();
+HType HType::FromType(AstType* type) {
+  if (AstType::Any()->Is(type)) return HType::Any();
   if (!type->IsInhabited()) return HType::None();
-  if (type->Is(Type::SignedSmall())) return HType::Smi();
-  if (type->Is(Type::Number())) return HType::TaggedNumber();
-  if (type->Is(Type::Null())) return HType::Null();
-  if (type->Is(Type::String())) return HType::String();
-  if (type->Is(Type::Boolean())) return HType::Boolean();
-  if (type->Is(Type::Undefined())) return HType::Undefined();
-  if (type->Is(Type::Object())) return HType::JSObject();
-  if (type->Is(Type::DetectableReceiver())) return HType::JSReceiver();
+  if (type->Is(AstType::SignedSmall())) return HType::Smi();
+  if (type->Is(AstType::Number())) return HType::TaggedNumber();
+  if (type->Is(AstType::Null())) return HType::Null();
+  if (type->Is(AstType::String())) return HType::String();
+  if (type->Is(AstType::Boolean())) return HType::Boolean();
+  if (type->Is(AstType::Undefined())) return HType::Undefined();
+  if (type->Is(AstType::Object())) return HType::JSObject();
+  if (type->Is(AstType::DetectableReceiver())) return HType::JSReceiver();
   return HType::Tagged();
 }
 
diff --git a/src/crankshaft/hydrogen-types.h b/src/crankshaft/hydrogen-types.h
index 0690ece..3e68872 100644
--- a/src/crankshaft/hydrogen-types.h
+++ b/src/crankshaft/hydrogen-types.h
@@ -8,8 +8,8 @@
 #include <climits>
 #include <iosfwd>
 
+#include "src/ast/ast-types.h"
 #include "src/base/macros.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -64,7 +64,7 @@
   HTYPE_LIST(DECLARE_IS_TYPE)
   #undef DECLARE_IS_TYPE
 
-  static HType FromType(Type* type) WARN_UNUSED_RESULT;
+  static HType FromType(AstType* type) WARN_UNUSED_RESULT;
   static HType FromFieldType(Handle<FieldType> type,
                              Zone* temp_zone) WARN_UNUSED_RESULT;
   static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index a33d2a6..8d7b479 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -9,6 +9,7 @@
 
 #include "src/allocation-site-scopes.h"
 #include "src/ast/ast-numbering.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/crankshaft/hydrogen-bce.h"
@@ -42,7 +43,6 @@
 // GetRootConstructor
 #include "src/ic/ic-inl.h"
 #include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
 #include "src/runtime/runtime.h"
 
 #if V8_TARGET_ARCH_IA32
@@ -75,7 +75,9 @@
 class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
  public:
   explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
-      : HOptimizedGraphBuilder(info) {}
+      : HOptimizedGraphBuilder(info, true) {
+    SetSourcePosition(info->shared_info()->start_position());
+  }
 
 #define DEF_VISIT(type)                                      \
   void Visit##type(type* node) override {                    \
@@ -178,9 +180,10 @@
   }
 
   HOptimizedGraphBuilder* graph_builder =
-      (info()->is_tracking_positions() || FLAG_trace_ic)
+      (FLAG_hydrogen_track_positions || isolate()->is_profiling() ||
+       FLAG_trace_ic)
           ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
-          : new (info()->zone()) HOptimizedGraphBuilder(info());
+          : new (info()->zone()) HOptimizedGraphBuilder(info(), false);
 
   // Type-check the function.
   AstTyper(info()->isolate(), info()->zone(), info()->closure(),
@@ -1362,7 +1365,7 @@
   DCHECK(!FLAG_minimal);
   graph_ = new (zone()) HGraph(info_, descriptor_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
-  if (!info_->IsStub() && info_->is_tracking_positions()) {
+  if (!info_->IsStub() && is_tracking_positions()) {
     TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
   }
   CompilationPhase phase("H_Block building", info_);
@@ -1374,7 +1377,7 @@
 
 int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
                                         SourcePosition position) {
-  DCHECK(info_->is_tracking_positions());
+  DCHECK(is_tracking_positions());
 
   int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
   HInlinedFunctionInfo info(shared->start_position());
@@ -1645,48 +1648,6 @@
 }
 
 
-void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
-                                                HValue* map,
-                                                ElementsKind from_kind,
-                                                ElementsKind to_kind,
-                                                bool is_jsarray) {
-  DCHECK(!IsFastHoleyElementsKind(from_kind) ||
-         IsFastHoleyElementsKind(to_kind));
-
-  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
-    Add<HTrapAllocationMemento>(object);
-  }
-
-  if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
-    HInstruction* elements = AddLoadElements(object);
-
-    HInstruction* empty_fixed_array = Add<HConstant>(
-        isolate()->factory()->empty_fixed_array());
-
-    IfBuilder if_builder(this);
-
-    if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
-
-    if_builder.Then();
-
-    HInstruction* elements_length = AddLoadFixedArrayLength(elements);
-
-    HInstruction* array_length =
-        is_jsarray
-            ? Add<HLoadNamedField>(object, nullptr,
-                                   HObjectAccess::ForArrayLength(from_kind))
-            : elements_length;
-
-    BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
-                              array_length, elements_length);
-
-    if_builder.End();
-  }
-
-  Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
-}
-
-
 void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
                                        int bit_field_mask) {
   // Check that the object isn't a smi.
@@ -2129,8 +2090,7 @@
   return result;
 }
 
-
-HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
+HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
   NoObservableSideEffectsScope scope(this);
 
   // Convert constant numbers at compile time.
@@ -2180,7 +2140,7 @@
   }
   if_objectissmi.Else();
   {
-    if (type->Is(Type::SignedSmall())) {
+    if (type->Is(AstType::SignedSmall())) {
       if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi);
     } else {
       // Check if the object is a heap number.
@@ -2236,7 +2196,7 @@
       }
       if_objectisnumber.Else();
       {
-        if (type->Is(Type::Number())) {
+        if (type->Is(AstType::Number())) {
           if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber);
         }
       }
@@ -2411,7 +2371,7 @@
   HValue* length = AddUncasted<HAdd>(left_length, right_length);
   // Check that length <= kMaxLength <=> length < MaxLength + 1.
   HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
-  if (top_info()->IsStub()) {
+  if (top_info()->IsStub() || !isolate()->IsStringLengthOverflowIntact()) {
     // This is a mitigation for crbug.com/627934; the real fix
     // will be to migrate the StringAddStub to TurboFan one day.
     IfBuilder if_invalid(this);
@@ -2423,6 +2383,7 @@
     }
     if_invalid.End();
   } else {
+    graph()->MarkDependsOnStringLengthOverflow();
     Add<HBoundsCheck>(length, max_length);
   }
   return length;
@@ -2652,7 +2613,7 @@
 
       IfBuilder if_size(this);
       if_size.If<HCompareNumericAndBranch>(
-          size, Add<HConstant>(Page::kMaxRegularHeapObjectSize), Token::LT);
+          size, Add<HConstant>(kMaxRegularHeapObjectSize), Token::LT);
       if_size.Then();
       {
         // Allocate the string object. HAllocate does not care whether we pass
@@ -3075,9 +3036,10 @@
                                                  ElementsKind new_kind,
                                                  HValue* length,
                                                  HValue* new_capacity) {
-  Add<HBoundsCheck>(new_capacity, Add<HConstant>(
-          (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
-          ElementsKindToShiftSize(new_kind)));
+  Add<HBoundsCheck>(
+      new_capacity,
+      Add<HConstant>((kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
+                     ElementsKindToShiftSize(new_kind)));
 
   HValue* new_elements =
       BuildAllocateAndInitializeArray(new_kind, new_capacity);
@@ -3268,93 +3230,6 @@
   AddIncrementCounter(counters->inlined_copied_elements());
 }
 
-
-HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
-                                                 HValue* allocation_site,
-                                                 AllocationSiteMode mode,
-                                                 ElementsKind kind) {
-  HAllocate* array = AllocateJSArrayObject(mode);
-
-  HValue* map = AddLoadMap(boilerplate);
-  HValue* elements = AddLoadElements(boilerplate);
-  HValue* length = AddLoadArrayLength(boilerplate, kind);
-
-  BuildJSArrayHeader(array,
-                     map,
-                     elements,
-                     mode,
-                     FAST_ELEMENTS,
-                     allocation_site,
-                     length);
-  return array;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
-                                                   HValue* allocation_site,
-                                                   AllocationSiteMode mode) {
-  HAllocate* array = AllocateJSArrayObject(mode);
-
-  HValue* map = AddLoadMap(boilerplate);
-
-  BuildJSArrayHeader(array,
-                     map,
-                     NULL,  // set elements to empty fixed array
-                     mode,
-                     FAST_ELEMENTS,
-                     allocation_site,
-                     graph()->GetConstant0());
-  return array;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
-                                                      HValue* allocation_site,
-                                                      AllocationSiteMode mode,
-                                                      ElementsKind kind) {
-  HValue* boilerplate_elements = AddLoadElements(boilerplate);
-  HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
-
-  // Generate size calculation code here in order to make it dominate
-  // the JSArray allocation.
-  HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
-
-  // Create empty JSArray object for now, store elimination should remove
-  // redundant initialization of elements and length fields and at the same
-  // time the object will be fully prepared for GC if it happens during
-  // elements allocation.
-  HValue* result = BuildCloneShallowArrayEmpty(
-      boilerplate, allocation_site, mode);
-
-  HAllocate* elements = BuildAllocateElements(kind, elements_size);
-
-  Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
-
-  // The allocation for the cloned array above causes register pressure on
-  // machines with low register counts. Force a reload of the boilerplate
-  // elements here to free up a register for the allocation to avoid unnecessary
-  // spillage.
-  boilerplate_elements = AddLoadElements(boilerplate);
-  boilerplate_elements->SetFlag(HValue::kCantBeReplaced);
-
-  // Copy the elements array header.
-  for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
-    HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
-    Add<HStoreNamedField>(
-        elements, access,
-        Add<HLoadNamedField>(boilerplate_elements, nullptr, access));
-  }
-
-  // And the result of the length
-  HValue* length = AddLoadArrayLength(boilerplate, kind);
-  Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length);
-
-  BuildCopyElements(boilerplate_elements, kind, elements,
-                    kind, length, NULL);
-  return result;
-}
-
-
 void HGraphBuilder::BuildCreateAllocationMemento(
     HValue* previous_object,
     HValue* previous_object_size,
@@ -3402,16 +3277,6 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildGetScriptContext(int context_index) {
-  HValue* native_context = BuildGetNativeContext();
-  HValue* script_context_table = Add<HLoadNamedField>(
-      native_context, nullptr,
-      HObjectAccess::ForContextSlot(Context::SCRIPT_CONTEXT_TABLE_INDEX));
-  return Add<HLoadNamedField>(script_context_table, nullptr,
-                              HObjectAccess::ForScriptContext(context_index));
-}
-
-
 HValue* HGraphBuilder::BuildGetParentContext(HValue* depth, int depth_value) {
   HValue* script_context = context();
   if (depth != NULL) {
@@ -3504,8 +3369,9 @@
   return Add<HLoadNamedField>(native_context, nullptr, function_access);
 }
 
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
-    : HGraphBuilder(info, CallInterfaceDescriptor()),
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+                                               bool track_positions)
+    : HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
       function_state_(NULL),
       initial_function_state_(this, info, NORMAL_RETURN, 0,
                               TailCallMode::kAllow),
@@ -3520,9 +3386,6 @@
   // to know it's the initial state.
   function_state_ = &initial_function_state_;
   InitializeAstVisitor(info->isolate());
-  if (top_info()->is_tracking_positions()) {
-    SetSourcePosition(info->shared_info()->start_position());
-  }
 }
 
 
@@ -3622,6 +3485,7 @@
       allow_code_motion_(false),
       use_optimistic_licm_(false),
       depends_on_empty_array_proto_elements_(false),
+      depends_on_string_length_overflow_(false),
       type_change_checksum_(0),
       maximum_environment_size_(0),
       no_side_effects_scope_count_(0),
@@ -3629,8 +3493,8 @@
       inlined_function_infos_(info->zone()) {
   if (info->IsStub()) {
     // For stubs, explicitly add the context to the environment.
-    start_environment_ = new (zone_)
-        HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
+    start_environment_ =
+        new (zone_) HEnvironment(zone_, descriptor.GetParameterCount() + 1);
   } else {
     start_environment_ =
         new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
@@ -4088,7 +3952,7 @@
   // Push on the state stack.
   owner->set_function_state(this);
 
-  if (compilation_info_->is_tracking_positions()) {
+  if (owner->is_tracking_positions()) {
     outer_source_position_ = owner->source_position();
     owner->EnterInlinedSource(
       info->shared_info()->start_position(),
@@ -4102,7 +3966,7 @@
   delete test_context_;
   owner_->set_function_state(outer_);
 
-  if (compilation_info_->is_tracking_positions()) {
+  if (owner_->is_tracking_positions()) {
     owner_->set_source_position(outer_source_position_);
     owner_->EnterInlinedSource(
       outer_->compilation_info()->shared_info()->start_position(),
@@ -4651,9 +4515,7 @@
     environment()->Bind(scope->arguments(), arguments_object);
   }
 
-  int rest_index;
-  Variable* rest = scope->rest_parameter(&rest_index);
-  if (rest) {
+  if (scope->rest_parameter() != nullptr) {
     return Bailout(kRestParameter);
   }
 
@@ -4704,7 +4566,7 @@
         }
         AddInstruction(function);
         // Allocate a block context and store it to the stack frame.
-        HValue* scope_info = Add<HConstant>(scope->GetScopeInfo(isolate()));
+        HValue* scope_info = Add<HConstant>(scope->scope_info());
         Add<HPushArguments>(scope_info, function);
         HInstruction* inner_context = Add<HCallRuntime>(
             Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
@@ -5001,7 +4863,7 @@
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   Add<HSimulate>(stmt->EntryId());
   HValue* tag_value = Top();
-  Type* tag_type = bounds_.get(stmt->tag()).lower;
+  AstType* tag_type = bounds_.get(stmt->tag()).lower;
 
   // 1. Build all the tests, with dangling true branches
   BailoutId default_id = BailoutId::None();
@@ -5018,8 +4880,8 @@
     if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
     HValue* label_value = Pop();
 
-    Type* label_type = bounds_.get(clause->label()).lower;
-    Type* combined_type = clause->compare_type();
+    AstType* label_type = bounds_.get(clause->label()).lower;
+    AstType* combined_type = clause->compare_type();
     HControlInstruction* compare = BuildCompareInstruction(
         Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
         combined_type,
@@ -5634,7 +5496,6 @@
   DCHECK(current_block()->HasPredecessor());
   Variable* variable = expr->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       if (IsLexicalVariableMode(variable->mode())) {
         // TODO(rossberg): should this be an DCHECK?
@@ -6218,7 +6079,7 @@
     PropertyAccessInfo* info) {
   if (!CanInlinePropertyAccess(map_)) return false;
 
-  // Currently only handle Type::Number as a polymorphic case.
+  // Currently only handle AstType::Number as a polymorphic case.
   // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
   // instruction.
   if (IsNumberType()) return false;
@@ -6929,9 +6790,16 @@
         HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
     Handle<TypeFeedbackVector> vector =
         handle(current_feedback_vector(), isolate());
-    HStoreNamedGeneric* instr =
-        Add<HStoreNamedGeneric>(global_object, var->name(), value,
-                                function_language_mode(), vector, slot);
+    HValue* name = Add<HConstant>(var->name());
+    HValue* vector_value = Add<HConstant>(vector);
+    HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+    Callable callable = CodeFactory::StoreICInOptimizedCode(
+        isolate(), function_language_mode());
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), global_object, name,
+                        value,     slot_value,    vector_value};
+    HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     USE(instr);
     DCHECK(instr->HasObservableSideEffects());
     Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6958,7 +6826,6 @@
     CHECK_ALIVE(VisitForValue(operation));
 
     switch (var->location()) {
-      case VariableLocation::GLOBAL:
       case VariableLocation::UNALLOCATED:
         HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
                                        expr->AssignmentId());
@@ -6966,9 +6833,6 @@
 
       case VariableLocation::PARAMETER:
       case VariableLocation::LOCAL:
-        if (var->mode() == CONST_LEGACY)  {
-          return Bailout(kUnsupportedConstCompoundAssignment);
-        }
         if (var->mode() == CONST) {
           return Bailout(kNonInitializerAssignmentToConst);
         }
@@ -6998,9 +6862,7 @@
             mode = HStoreContextSlot::kCheckDeoptimize;
             break;
           case CONST:
-            return Bailout(kNonInitializerAssignmentToConst);
-          case CONST_LEGACY:
-            if (is_strict(function_language_mode())) {
+            if (var->throw_on_const_assignment(function_language_mode())) {
               return Bailout(kNonInitializerAssignmentToConst);
             } else {
               return ast_context()->ReturnValue(Pop());
@@ -7072,33 +6934,17 @@
 
     if (var->mode() == CONST) {
       if (expr->op() != Token::INIT) {
-        return Bailout(kNonInitializerAssignmentToConst);
-      }
-    } else if (var->mode() == CONST_LEGACY) {
-      if (expr->op() != Token::INIT) {
-        if (is_strict(function_language_mode())) {
+        if (var->throw_on_const_assignment(function_language_mode())) {
           return Bailout(kNonInitializerAssignmentToConst);
         } else {
           CHECK_ALIVE(VisitForValue(expr->value()));
           return ast_context()->ReturnValue(Pop());
         }
       }
-
-      // TODO(adamk): Is this required? Legacy const variables are always
-      // initialized before use.
-      if (var->IsStackAllocated()) {
-        // We insert a use of the old value to detect unsupported uses of const
-        // variables (e.g. initialization inside a loop).
-        HValue* old_value = environment()->Lookup(var);
-        Add<HUseConst>(old_value);
-      }
     }
 
-    if (var->is_arguments()) return Bailout(kAssignmentToArguments);
-
     // Handle the assignment.
     switch (var->location()) {
-      case VariableLocation::GLOBAL:
       case VariableLocation::UNALLOCATED:
         CHECK_ALIVE(VisitForValue(expr->value()));
         HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
@@ -7147,10 +6993,10 @@
               mode = HStoreContextSlot::kCheckDeoptimize;
               break;
             case CONST:
-              // This case is checked statically so no need to
-              // perform checks here
-              UNREACHABLE();
-            case CONST_LEGACY:
+              // If we reached this point, the only possibility
+              // is a sloppy assignment to a function name.
+              DCHECK(function_language_mode() == SLOPPY &&
+                     !var->throw_on_const_assignment(SLOPPY));
               return ast_context()->ReturnValue(Pop());
             default:
               mode = HStoreContextSlot::kNoCheck;
@@ -7200,7 +7046,7 @@
   CHECK_ALIVE(VisitForValue(expr->exception()));
 
   HValue* value = environment()->Pop();
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
   Add<HPushArguments>(value);
   Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kThrow), 1);
   Add<HSimulate>(expr->id());
@@ -7274,20 +7120,30 @@
     Handle<TypeFeedbackVector> vector =
         handle(current_feedback_vector(), isolate());
 
+    HValue* key = Add<HConstant>(name);
+    HValue* vector_value = Add<HConstant>(vector);
+    HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+    HValue* values[] = {context(), object,     key,
+                        value,     slot_value, vector_value};
+
     if (current_feedback_vector()->GetKind(slot) ==
         FeedbackVectorSlotKind::KEYED_STORE_IC) {
       // It's possible that a keyed store of a constant string was converted
       // to a named store. Here, at the last minute, we need to make sure to
       // use a generic Keyed Store if we are using the type vector, because
       // it has to share information with full code.
-      HConstant* key = Add<HConstant>(name);
-      HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
-          object, key, value, function_language_mode(), vector, slot);
+      Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+          isolate(), function_language_mode());
+      HValue* stub = Add<HConstant>(callable.code());
+      HCallWithDescriptor* result = New<HCallWithDescriptor>(
+          stub, 0, callable.descriptor(), ArrayVector(values));
       return result;
     }
-
-    HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
-        object, name, value, function_language_mode(), vector, slot);
+    Callable callable = CodeFactory::StoreICInOptimizedCode(
+        isolate(), function_language_mode());
+    HValue* stub = Add<HConstant>(callable.code());
+    HCallWithDescriptor* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     return result;
   }
 }
@@ -7303,8 +7159,16 @@
         New<HLoadKeyedGeneric>(object, key, vector, slot);
     return result;
   } else {
-    HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
-        object, key, value, function_language_mode(), vector, slot);
+    HValue* vector_value = Add<HConstant>(vector);
+    HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+    HValue* values[] = {context(), object,     key,
+                        value,     slot_value, vector_value};
+
+    Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+        isolate(), function_language_mode());
+    HValue* stub = Add<HConstant>(callable.code());
+    HCallWithDescriptor* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     return result;
   }
 }
@@ -7843,7 +7707,7 @@
     }
 
     HValue* checked_object;
-    // Type::Number() is only supported by polymorphic load/call handling.
+    // AstType::Number() is only supported by polymorphic load/call handling.
     DCHECK(!info.IsNumberType());
     BuildCheckHeapObject(object);
     if (AreStringTypes(maps)) {
@@ -8409,14 +8273,12 @@
     return false;
   }
 
-  if (target_info.scope()->num_heap_slots() > 0) {
+  if (target_info.scope()->NeedsContext()) {
     TraceInline(target, caller, "target has context-allocated variables");
     return false;
   }
 
-  int rest_index;
-  Variable* rest = target_info.scope()->rest_parameter(&rest_index);
-  if (rest) {
+  if (target_info.scope()->rest_parameter() != nullptr) {
     TraceInline(target, caller, "target uses rest parameters");
     return false;
   }
@@ -8490,7 +8352,7 @@
       .Run();
 
   int inlining_id = 0;
-  if (top_info()->is_tracking_positions()) {
+  if (is_tracking_positions()) {
     inlining_id = TraceInlinedFunction(target_shared, source_position());
   }
 
@@ -8539,7 +8401,7 @@
       return_id, target, context, arguments_count, function,
       function_state()->inlining_kind(), function->scope()->arguments(),
       arguments_object, syntactic_tail_call_mode);
-  if (top_info()->is_tracking_positions()) {
+  if (is_tracking_positions()) {
     enter_inlined->set_inlining_id(inlining_id);
   }
   function_state()->set_entry(enter_inlined);
@@ -9375,7 +9237,7 @@
   HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
 
   HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
-                       api_function_address, nullptr};
+                       api_function_address};
 
   HInstruction* call = nullptr;
   CHECK(argc <= CallApiCallbackStub::kArgMax);
@@ -9386,16 +9248,14 @@
     HConstant* code_value = Add<HConstant>(code);
     call = New<HCallWithDescriptor>(
         code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
-        Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
-        syntactic_tail_call_mode);
+        Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
   } else {
     CallApiCallbackStub stub(isolate(), argc, call_data_undefined, false);
     Handle<Code> code = stub.GetCode();
     HConstant* code_value = Add<HConstant>(code);
     call = New<HCallWithDescriptor>(
         code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
-        Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
-        syntactic_tail_call_mode);
+        Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
     Drop(1);  // Drop function.
   }
 
@@ -9461,8 +9321,6 @@
     case kFunctionApply: {
       // For .apply, only the pattern f.apply(receiver, arguments)
       // is supported.
-      if (current_info()->scope()->arguments() == NULL) return false;
-
       if (!CanBeFunctionApplyArguments(expr)) return false;
 
       BuildFunctionApply(expr);
@@ -9482,6 +9340,10 @@
   HValue* function = Pop();  // f
   Drop(1);  // apply
 
+  // Make sure the arguments object is live.
+  VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+  LookupAndMakeLive(arg_two->var());
+
   Handle<Map> function_map = expr->GetReceiverTypes()->first();
   HValue* checked_function = AddCheckMap(function, function_map);
 
@@ -9727,8 +9589,9 @@
   if (args->length() != 2) return false;
   VariableProxy* arg_two = args->at(1)->AsVariableProxy();
   if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
-  HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
+  HValue* arg_two_value = environment()->Lookup(arg_two->var());
   if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+  DCHECK_NOT_NULL(current_info()->scope()->arguments());
   return true;
 }
 
@@ -9737,7 +9600,7 @@
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
   Expression* callee = expr->expression();
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
@@ -9975,7 +9838,7 @@
   HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
 
   // Bail out for large objects.
-  HValue* max_size = Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+  HValue* max_size = Add<HConstant>(kMaxRegularHeapObjectSize);
   Add<HBoundsCheck>(elements_size, max_size);
 
   // Allocate (dealing with failure appropriately).
@@ -10019,7 +9882,7 @@
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
   int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
   Factory* factory = isolate()->factory();
 
@@ -10419,6 +10282,8 @@
 
     HInstruction* length = AddUncasted<HDiv>(byte_length,
         Add<HConstant>(static_cast<int32_t>(element_size)));
+    // Callers (in typedarray.js) ensure that length <= %_MaxSmi().
+    length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
 
     Add<HStoreNamedField>(obj,
         HObjectAccess::ForJSTypedArrayLength(),
@@ -10602,7 +10467,7 @@
     return ast_context()->ReturnInstruction(instr, expr->id());
   } else if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->IsUnallocatedOrGlobalSlot()) {
+    if (var->IsUnallocated()) {
       Bailout(kDeleteWithGlobalVariable);
     } else if (var->IsStackAllocated() || var->IsContextSlot()) {
       // Result of deleting non-global variables is false.  'this' is not really
@@ -10680,13 +10545,12 @@
   if (join != NULL) return ast_context()->ReturnValue(Pop());
 }
 
-
-static Representation RepresentationFor(Type* type) {
+static Representation RepresentationFor(AstType* type) {
   DisallowHeapAllocation no_allocation;
-  if (type->Is(Type::None())) return Representation::None();
-  if (type->Is(Type::SignedSmall())) return Representation::Smi();
-  if (type->Is(Type::Signed32())) return Representation::Integer32();
-  if (type->Is(Type::Number())) return Representation::Double();
+  if (type->Is(AstType::None())) return Representation::None();
+  if (type->Is(AstType::SignedSmall())) return Representation::Smi();
+  if (type->Is(AstType::Signed32())) return Representation::Integer32();
+  if (type->Is(AstType::Number())) return Representation::Double();
   return Representation::Tagged();
 }
 
@@ -10745,7 +10609,7 @@
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
   Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
   Property* prop = target->AsProperty();
@@ -10763,9 +10627,6 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == CONST_LEGACY)  {
-      return Bailout(kUnsupportedCountOperationWithConst);
-    }
     if (var->mode() == CONST) {
       return Bailout(kNonInitializerAssignmentToConst);
     }
@@ -10778,7 +10639,6 @@
     Push(after);
 
     switch (var->location()) {
-      case VariableLocation::GLOBAL:
       case VariableLocation::UNALLOCATED:
         HandleGlobalVariableAssignment(var, after, expr->CountSlot(),
                                        expr->AssignmentId());
@@ -10939,27 +10799,24 @@
   return true;
 }
 
-
-HValue* HGraphBuilder::EnforceNumberType(HValue* number,
-                                         Type* expected) {
-  if (expected->Is(Type::SignedSmall())) {
+HValue* HGraphBuilder::EnforceNumberType(HValue* number, AstType* expected) {
+  if (expected->Is(AstType::SignedSmall())) {
     return AddUncasted<HForceRepresentation>(number, Representation::Smi());
   }
-  if (expected->Is(Type::Signed32())) {
+  if (expected->Is(AstType::Signed32())) {
     return AddUncasted<HForceRepresentation>(number,
                                              Representation::Integer32());
   }
   return number;
 }
 
-
-HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, AstType** expected) {
   if (value->IsConstant()) {
     HConstant* constant = HConstant::cast(value);
     Maybe<HConstant*> number =
         constant->CopyToTruncatedNumber(isolate(), zone());
     if (number.IsJust()) {
-      *expected = Type::Number();
+      *expected = AstType::Number();
       return AddInstruction(number.FromJust());
     }
   }
@@ -10969,24 +10826,24 @@
   // pushes with a NoObservableSideEffectsScope.
   NoObservableSideEffectsScope no_effects(this);
 
-  Type* expected_type = *expected;
+  AstType* expected_type = *expected;
 
   // Separate the number type from the rest.
-  Type* expected_obj =
-      Type::Intersect(expected_type, Type::NonNumber(), zone());
-  Type* expected_number =
-      Type::Intersect(expected_type, Type::Number(), zone());
+  AstType* expected_obj =
+      AstType::Intersect(expected_type, AstType::NonNumber(), zone());
+  AstType* expected_number =
+      AstType::Intersect(expected_type, AstType::Number(), zone());
 
   // We expect to get a number.
-  // (We need to check first, since Type::None->Is(Type::Any()) == true.
-  if (expected_obj->Is(Type::None())) {
-    DCHECK(!expected_number->Is(Type::None()));
+  // (We need to check first, since AstType::None->Is(AstType::Any()) == true.
+  if (expected_obj->Is(AstType::None())) {
+    DCHECK(!expected_number->Is(AstType::None()));
     return value;
   }
 
-  if (expected_obj->Is(Type::Undefined())) {
+  if (expected_obj->Is(AstType::Undefined())) {
     // This is already done by HChange.
-    *expected = Type::Union(expected_number, Type::Number(), zone());
+    *expected = AstType::Union(expected_number, AstType::Number(), zone());
     return value;
   }
 
@@ -10999,9 +10856,9 @@
     HValue* left,
     HValue* right,
     PushBeforeSimulateBehavior push_sim_result) {
-  Type* left_type = bounds_.get(expr->left()).lower;
-  Type* right_type = bounds_.get(expr->right()).lower;
-  Type* result_type = bounds_.get(expr).lower;
+  AstType* left_type = bounds_.get(expr->left()).lower;
+  AstType* right_type = bounds_.get(expr->right()).lower;
+  AstType* result_type = bounds_.get(expr).lower;
   Maybe<int> fixed_right_arg = expr->fixed_right_arg();
   Handle<AllocationSite> allocation_site = expr->allocation_site();
 
@@ -11027,12 +10884,10 @@
   return result;
 }
 
-HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
-                                            HValue* right, Type* left_type,
-                                            Type* right_type, Type* result_type,
-                                            Maybe<int> fixed_right_arg,
-                                            HAllocationMode allocation_mode,
-                                            BailoutId opt_id) {
+HValue* HGraphBuilder::BuildBinaryOperation(
+    Token::Value op, HValue* left, HValue* right, AstType* left_type,
+    AstType* right_type, AstType* result_type, Maybe<int> fixed_right_arg,
+    HAllocationMode allocation_mode, BailoutId opt_id) {
   bool maybe_string_add = false;
   if (op == Token::ADD) {
     // If we are adding constant string with something for which we don't have
@@ -11040,18 +10895,18 @@
     // generate deopt instructions.
     if (!left_type->IsInhabited() && right->IsConstant() &&
         HConstant::cast(right)->HasStringValue()) {
-      left_type = Type::String();
+      left_type = AstType::String();
     }
 
     if (!right_type->IsInhabited() && left->IsConstant() &&
         HConstant::cast(left)->HasStringValue()) {
-      right_type = Type::String();
+      right_type = AstType::String();
     }
 
-    maybe_string_add = (left_type->Maybe(Type::String()) ||
-                        left_type->Maybe(Type::Receiver()) ||
-                        right_type->Maybe(Type::String()) ||
-                        right_type->Maybe(Type::Receiver()));
+    maybe_string_add = (left_type->Maybe(AstType::String()) ||
+                        left_type->Maybe(AstType::Receiver()) ||
+                        right_type->Maybe(AstType::String()) ||
+                        right_type->Maybe(AstType::Receiver()));
   }
 
   Representation left_rep = RepresentationFor(left_type);
@@ -11061,7 +10916,7 @@
     Add<HDeoptimize>(
         DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
         Deoptimizer::SOFT);
-    left_type = Type::Any();
+    left_type = AstType::Any();
     left_rep = RepresentationFor(left_type);
     maybe_string_add = op == Token::ADD;
   }
@@ -11070,7 +10925,7 @@
     Add<HDeoptimize>(
         DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
         Deoptimizer::SOFT);
-    right_type = Type::Any();
+    right_type = AstType::Any();
     right_rep = RepresentationFor(right_type);
     maybe_string_add = op == Token::ADD;
   }
@@ -11082,34 +10937,34 @@
 
   // Special case for string addition here.
   if (op == Token::ADD &&
-      (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+      (left_type->Is(AstType::String()) || right_type->Is(AstType::String()))) {
     // Validate type feedback for left argument.
-    if (left_type->Is(Type::String())) {
+    if (left_type->Is(AstType::String())) {
       left = BuildCheckString(left);
     }
 
     // Validate type feedback for right argument.
-    if (right_type->Is(Type::String())) {
+    if (right_type->Is(AstType::String())) {
       right = BuildCheckString(right);
     }
 
     // Convert left argument as necessary.
-    if (left_type->Is(Type::Number())) {
-      DCHECK(right_type->Is(Type::String()));
+    if (left_type->Is(AstType::Number())) {
+      DCHECK(right_type->Is(AstType::String()));
       left = BuildNumberToString(left, left_type);
-    } else if (!left_type->Is(Type::String())) {
-      DCHECK(right_type->Is(Type::String()));
+    } else if (!left_type->Is(AstType::String())) {
+      DCHECK(right_type->Is(AstType::String()));
       return AddUncasted<HStringAdd>(
           left, right, allocation_mode.GetPretenureMode(),
           STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
     }
 
     // Convert right argument as necessary.
-    if (right_type->Is(Type::Number())) {
-      DCHECK(left_type->Is(Type::String()));
+    if (right_type->Is(AstType::Number())) {
+      DCHECK(left_type->Is(AstType::String()));
       right = BuildNumberToString(right, right_type);
-    } else if (!right_type->Is(Type::String())) {
-      DCHECK(left_type->Is(Type::String()));
+    } else if (!right_type->Is(AstType::String())) {
+      DCHECK(left_type->Is(AstType::String()));
       return AddUncasted<HStringAdd>(
           left, right, allocation_mode.GetPretenureMode(),
           STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
@@ -11267,8 +11122,8 @@
         break;
       case Token::BIT_OR: {
         HValue *operand, *shift_amount;
-        if (left_type->Is(Type::Signed32()) &&
-            right_type->Is(Type::Signed32()) &&
+        if (left_type->Is(AstType::Signed32()) &&
+            right_type->Is(AstType::Signed32()) &&
             MatchRotateRight(left, right, &operand, &shift_amount)) {
           instr = AddUncasted<HRor>(operand, shift_amount);
         } else {
@@ -11470,7 +11325,7 @@
       BuildBinaryOperation(expr, left, right,
           ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
                                     : PUSH_BEFORE_SIMULATE);
-  if (top_info()->is_tracking_positions() && result->IsBinaryOperation()) {
+  if (is_tracking_positions() && result->IsBinaryOperation()) {
     HBinaryOperation::cast(result)->SetOperandPositions(
         zone(),
         ScriptPositionToSourcePosition(expr->left()->position()),
@@ -11512,7 +11367,7 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
 
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
 
   // Check for a few fast cases. The AST visiting behavior must be in sync
   // with the full codegen: We don't push both left and right values onto
@@ -11540,9 +11395,9 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  Type* left_type = bounds_.get(expr->left()).lower;
-  Type* right_type = bounds_.get(expr->right()).lower;
-  Type* combined_type = expr->combined_type();
+  AstType* left_type = bounds_.get(expr->left()).lower;
+  AstType* right_type = bounds_.get(expr->right()).lower;
+  AstType* combined_type = expr->combined_type();
 
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
@@ -11563,24 +11418,37 @@
         HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
       Handle<JSFunction> function =
           Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
-      // Make sure the prototype of {function} is the %FunctionPrototype%, and
-      // it already has a meaningful initial map (i.e. we constructed at least
-      // one instance using the constructor {function}).
-      // We can only use the fast case if @@hasInstance was not used so far.
-      if (function->has_initial_map() &&
-          function->map()->prototype() ==
-              function->native_context()->closure() &&
-          !function->map()->has_non_instance_prototype() &&
-          isolate()->IsHasInstanceLookupChainIntact()) {
-        Handle<Map> initial_map(function->initial_map(), isolate());
-        top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
-        top_info()->dependencies()->AssumePropertyCell(
-            isolate()->factory()->has_instance_protector());
-        HInstruction* prototype =
-            Add<HConstant>(handle(initial_map->prototype(), isolate()));
-        HHasInPrototypeChainAndBranch* result =
-            New<HHasInPrototypeChainAndBranch>(left, prototype);
-        return ast_context()->ReturnControl(result, expr->id());
+      // Make sure that the {function} already has a meaningful initial map
+      // (i.e. we constructed at least one instance using the constructor
+      // {function}).
+      if (function->has_initial_map()) {
+        // Lookup @@hasInstance on the {function}.
+        Handle<Map> function_map(function->map(), isolate());
+        PropertyAccessInfo has_instance(
+            this, LOAD, function_map,
+            isolate()->factory()->has_instance_symbol());
+        // Check if we are using the Function.prototype[@@hasInstance].
+        if (has_instance.CanAccessMonomorphic() &&
+            has_instance.IsDataConstant() &&
+            has_instance.constant().is_identical_to(
+                isolate()->function_has_instance())) {
+          // Add appropriate receiver map check and prototype chain
+          // checks to guard the @@hasInstance lookup chain.
+          AddCheckMap(right, function_map);
+          if (has_instance.has_holder()) {
+            Handle<JSObject> prototype(
+                JSObject::cast(has_instance.map()->prototype()), isolate());
+            BuildCheckPrototypeMaps(prototype, has_instance.holder());
+          }
+          // Perform the prototype chain walk.
+          Handle<Map> initial_map(function->initial_map(), isolate());
+          top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+          HInstruction* prototype =
+              Add<HConstant>(handle(initial_map->prototype(), isolate()));
+          HHasInPrototypeChainAndBranch* result =
+              New<HHasInPrototypeChainAndBranch>(left, prototype);
+          return ast_context()->ReturnControl(result, expr->id());
+        }
       }
     }
 
@@ -11614,10 +11482,9 @@
   return ast_context()->ReturnControl(compare, expr->id());
 }
 
-
 HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
-    Token::Value op, HValue* left, HValue* right, Type* left_type,
-    Type* right_type, Type* combined_type, SourcePosition left_position,
+    Token::Value op, HValue* left, HValue* right, AstType* left_type,
+    AstType* right_type, AstType* combined_type, SourcePosition left_position,
     SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
     BailoutId bailout_id) {
   // Cases handled below depend on collected type feedback. They should
@@ -11627,14 +11494,14 @@
         DeoptimizeReason::
             kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
         Deoptimizer::SOFT);
-    combined_type = left_type = right_type = Type::Any();
+    combined_type = left_type = right_type = AstType::Any();
   }
 
   Representation left_rep = RepresentationFor(left_type);
   Representation right_rep = RepresentationFor(right_type);
   Representation combined_rep = RepresentationFor(combined_type);
 
-  if (combined_type->Is(Type::Receiver())) {
+  if (combined_type->Is(AstType::Receiver())) {
     if (Token::IsEqualityOp(op)) {
       // HCompareObjectEqAndBranch can only deal with object, so
       // exclude numbers.
@@ -11656,7 +11523,7 @@
         AddCheckMap(operand_to_check, map);
         HCompareObjectEqAndBranch* result =
             New<HCompareObjectEqAndBranch>(left, right);
-        if (top_info()->is_tracking_positions()) {
+        if (is_tracking_positions()) {
           result->set_operand_position(zone(), 0, left_position);
           result->set_operand_position(zone(), 1, right_position);
         }
@@ -11718,7 +11585,7 @@
       Bailout(kUnsupportedNonPrimitiveCompare);
       return NULL;
     }
-  } else if (combined_type->Is(Type::InternalizedString()) &&
+  } else if (combined_type->Is(AstType::InternalizedString()) &&
              Token::IsEqualityOp(op)) {
     // If we have a constant argument, it should be consistent with the type
     // feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
@@ -11739,7 +11606,7 @@
     HCompareObjectEqAndBranch* result =
         New<HCompareObjectEqAndBranch>(left, right);
     return result;
-  } else if (combined_type->Is(Type::String())) {
+  } else if (combined_type->Is(AstType::String())) {
     BuildCheckHeapObject(left);
     Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
     BuildCheckHeapObject(right);
@@ -11747,7 +11614,7 @@
     HStringCompareAndBranch* result =
         New<HStringCompareAndBranch>(left, right, op);
     return result;
-  } else if (combined_type->Is(Type::Boolean())) {
+  } else if (combined_type->Is(AstType::Boolean())) {
     AddCheckMap(left, isolate()->factory()->boolean_map());
     AddCheckMap(right, isolate()->factory()->boolean_map());
     if (Token::IsEqualityOp(op)) {
@@ -11799,7 +11666,7 @@
       HCompareNumericAndBranch* result =
           New<HCompareNumericAndBranch>(left, right, op);
       result->set_observed_input_representation(left_rep, right_rep);
-      if (top_info()->is_tracking_positions()) {
+      if (is_tracking_positions()) {
         result->SetOperandPositions(zone(), left_position, right_position);
       }
       return result;
@@ -11815,7 +11682,7 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
   DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
-  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  if (!is_tracking_positions()) SetSourcePosition(expr->position());
   CHECK_ALIVE(VisitForValue(sub_expr));
   HValue* value = Pop();
   HControlInstruction* instr;
@@ -11886,7 +11753,7 @@
       Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
                      graph()->GetConstant0(), top_site);
 
-  // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
+  // If allocation folding reaches kMaxRegularHeapObjectSize the
   // elements array may not get folded into the object. Hence, we set the
   // elements pointer to empty fixed array and let store elimination remove
   // this store in the folding case.
@@ -12183,7 +12050,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -12223,7 +12089,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -12462,27 +12327,18 @@
 }
 
 
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* char_code = Pop();
-  HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 // Fast support for SubString.
 void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   DCHECK_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
-  PushArgumentsFromEnvironment(call->arguments()->length());
   Callable callable = CodeFactory::SubString(isolate());
   HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {context()};
-  HInstruction* result =
-      New<HCallWithDescriptor>(stub, call->arguments()->length(),
-                               callable.descriptor(), ArrayVector(values));
+  HValue* to = Pop();
+  HValue* from = Pop();
+  HValue* string = Pop();
+  HValue* values[] = {context(), string, from, to};
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub, 0, callable.descriptor(), ArrayVector(values));
   result->set_type(HType::String());
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -12504,13 +12360,16 @@
 void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   DCHECK_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
-  PushArgumentsFromEnvironment(call->arguments()->length());
   Callable callable = CodeFactory::RegExpExec(isolate());
+  HValue* last_match_info = Pop();
+  HValue* index = Pop();
+  HValue* subject = Pop();
+  HValue* regexp_object = Pop();
   HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {context()};
-  HInstruction* result =
-      New<HCallWithDescriptor>(stub, call->arguments()->length(),
-                               callable.descriptor(), ArrayVector(values));
+  HValue* values[] = {context(), regexp_object, subject, index,
+                      last_match_info};
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub, 0, callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12554,7 +12413,7 @@
   DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* number = Pop();
-  HValue* result = BuildNumberToString(number, Type::Any());
+  HValue* result = BuildNumberToString(number, AstType::Any());
   return ast_context()->ReturnValue(result);
 }
 
@@ -13236,8 +13095,7 @@
         PrintIndent();
         std::ostringstream os;
         os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
-        if (graph->info()->is_tracking_positions() &&
-            instruction->has_position() && instruction->position().raw() != 0) {
+        if (instruction->has_position() && instruction->position().raw() != 0) {
           const SourcePosition pos = instruction->position();
           os << " pos:";
           if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index 931dd01..d2f1637 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -8,13 +8,15 @@
 #include "src/accessors.h"
 #include "src/allocation.h"
 #include "src/ast/ast-type-bounds.h"
+#include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen-instructions.h"
 #include "src/globals.h"
 #include "src/parsing/parse-info.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -30,12 +32,11 @@
 class LAllocator;
 class LChunk;
 class LiveRange;
-class Scope;
 
 class HCompilationJob final : public CompilationJob {
  public:
   explicit HCompilationJob(Handle<JSFunction> function)
-      : CompilationJob(&info_, "Crankshaft"),
+      : CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
         zone_(function->GetIsolate()->allocator()),
         parse_info_(&zone_, function),
         info_(&parse_info_, function),
@@ -439,6 +440,13 @@
     return depends_on_empty_array_proto_elements_;
   }
 
+  void MarkDependsOnStringLengthOverflow() {
+    if (depends_on_string_length_overflow_) return;
+    info()->dependencies()->AssumePropertyCell(
+        isolate()->factory()->string_length_protector());
+    depends_on_string_length_overflow_ = true;
+  }
+
   bool has_uint32_instructions() {
     DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
     return uint32_instructions_ != NULL;
@@ -514,6 +522,7 @@
   bool allow_code_motion_;
   bool use_optimistic_licm_;
   bool depends_on_empty_array_proto_elements_;
+  bool depends_on_string_length_overflow_;
   int type_change_checksum_;
   int maximum_environment_size_;
   int no_side_effects_scope_count_;
@@ -1056,14 +1065,16 @@
 class HGraphBuilder {
  public:
   explicit HGraphBuilder(CompilationInfo* info,
-                         CallInterfaceDescriptor descriptor)
+                         CallInterfaceDescriptor descriptor,
+                         bool track_positions)
       : info_(info),
         descriptor_(descriptor),
         graph_(NULL),
         current_block_(NULL),
         scope_(info->scope()),
         position_(SourcePosition::Unknown()),
-        start_position_(0) {}
+        start_position_(0),
+        track_positions_(track_positions) {}
   virtual ~HGraphBuilder() {}
 
   Scope* scope() const { return scope_; }
@@ -1395,7 +1406,7 @@
                                    ElementsKind to_kind,
                                    bool is_jsarray);
 
-  HValue* BuildNumberToString(HValue* object, Type* type);
+  HValue* BuildNumberToString(HValue* object, AstType* type);
   HValue* BuildToNumber(HValue* input);
   HValue* BuildToObject(HValue* receiver);
 
@@ -1499,8 +1510,8 @@
                         HValue** shift_amount);
 
   HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right,
-                               Type* left_type, Type* right_type,
-                               Type* result_type, Maybe<int> fixed_right_arg,
+                               AstType* left_type, AstType* right_type,
+                               AstType* result_type, Maybe<int> fixed_right_arg,
                                HAllocationMode allocation_mode,
                                BailoutId opt_id = BailoutId::None());
 
@@ -1513,8 +1524,8 @@
 
   HValue* AddLoadJSBuiltin(int context_index);
 
-  HValue* EnforceNumberType(HValue* number, Type* expected);
-  HValue* TruncateToNumber(HValue* value, Type** expected);
+  HValue* EnforceNumberType(HValue* number, AstType* expected);
+  HValue* TruncateToNumber(HValue* value, AstType** expected);
 
   void FinishExitWithHardDeoptimization(DeoptimizeReason reason);
 
@@ -1833,20 +1844,6 @@
                          HValue* length,
                          HValue* capacity);
 
-  HValue* BuildCloneShallowArrayCow(HValue* boilerplate,
-                                    HValue* allocation_site,
-                                    AllocationSiteMode mode,
-                                    ElementsKind kind);
-
-  HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate,
-                                      HValue* allocation_site,
-                                      AllocationSiteMode mode);
-
-  HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
-                                         HValue* allocation_site,
-                                         AllocationSiteMode mode,
-                                         ElementsKind kind);
-
   HValue* BuildElementIndexHash(HValue* index);
 
   void BuildCreateAllocationMemento(HValue* previous_object,
@@ -1859,7 +1856,7 @@
 
   HInstruction* BuildGetNativeContext(HValue* closure);
   HInstruction* BuildGetNativeContext();
-  HInstruction* BuildGetScriptContext(int context_index);
+
   // Builds a loop version if |depth| is specified or unrolls the loop to
   // |depth_value| iterations otherwise.
   HValue* BuildGetParentContext(HValue* depth, int depth_value);
@@ -1879,7 +1876,7 @@
   }
 
   void EnterInlinedSource(int start_position, int id) {
-    if (top_info()->is_tracking_positions()) {
+    if (is_tracking_positions()) {
       start_position_ = start_position;
       position_.set_inlining_id(id);
     }
@@ -1900,6 +1897,8 @@
   SourcePosition source_position() { return position_; }
   void set_source_position(SourcePosition position) { position_ = position; }
 
+  bool is_tracking_positions() { return track_positions_; }
+
   int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
                            SourcePosition position);
 
@@ -1925,6 +1924,7 @@
   Scope* scope_;
   SourcePosition position_;
   int start_position_;
+  bool track_positions_;
 };
 
 template <>
@@ -2122,7 +2122,7 @@
     BreakAndContinueScope* next_;
   };
 
-  explicit HOptimizedGraphBuilder(CompilationInfo* info);
+  explicit HOptimizedGraphBuilder(CompilationInfo* info, bool track_positions);
 
   bool BuildGraph() override;
 
@@ -2214,7 +2214,6 @@
   F(IsJSProxy)                         \
   F(Call)                              \
   F(NewObject)                         \
-  F(StringCharFromCode)                \
   F(ToInteger)                         \
   F(ToObject)                          \
   F(ToString)                          \
@@ -2305,11 +2304,9 @@
                                                 int index,
                                                 HEnvironment* env) {
     if (!FLAG_analyze_environment_liveness) return false;
-    // |this| and |arguments| are always live; zapping parameters isn't
-    // safe because function.arguments can inspect them at any time.
-    return !var->is_this() &&
-           !var->is_arguments() &&
-           env->is_local_index(index);
+    // Zapping parameters isn't safe because function.arguments can inspect them
+    // at any time.
+    return env->is_local_index(index);
   }
   void BindIfLive(Variable* var, HValue* value) {
     HEnvironment* env = environment();
@@ -2706,8 +2703,8 @@
   };
 
   HControlInstruction* BuildCompareInstruction(
-      Token::Value op, HValue* left, HValue* right, Type* left_type,
-      Type* right_type, Type* combined_type, SourcePosition left_position,
+      Token::Value op, HValue* left, HValue* right, AstType* left_type,
+      AstType* right_type, AstType* combined_type, SourcePosition left_position,
       SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
       BailoutId bailout_id);
 
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 2512e2b..6c121dd 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -164,7 +164,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
@@ -172,7 +172,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(edi);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2397,20 +2397,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ mov(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->result()).is(eax));
@@ -3703,21 +3689,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ mov(StoreDescriptor::NameRegister(), instr->name());
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
   if (instr->index()->IsConstantOperand()) {
@@ -3877,21 +3848,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   Register object = ToRegister(instr->object());
   Register temp = ToRegister(instr->temp());
@@ -4831,7 +4787,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -4874,7 +4830,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, temp, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h
index 38a493d..8e16d9c 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -294,8 +294,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   void EmitReturn(LReturn* instr);
 
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index 6794224..e6077cc 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -351,15 +351,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -392,15 +383,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -910,7 +892,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1054,6 +1036,10 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
+
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   // Target
@@ -1061,15 +1047,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), esi);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2211,26 +2202,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result = new (zone())
-      LStoreKeyedGeneric(context, object, key, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2332,20 +2303,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index e525341..816d8fd 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -136,9 +136,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2022,32 +2020,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
@@ -2078,34 +2050,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/crankshaft/lithium-allocator.h b/src/crankshaft/lithium-allocator.h
index ce0e565..d28ad7f 100644
--- a/src/crankshaft/lithium-allocator.h
+++ b/src/crankshaft/lithium-allocator.h
@@ -9,7 +9,7 @@
 #include "src/base/compiler-specific.h"
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/lithium.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index 5041de6..decc2a5 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -66,6 +66,8 @@
       source_position_table_builder_(info->zone(),
                                      info->SourcePositionRecordingMode()) {}
 
+Isolate* LCodeGenBase::isolate() const { return info_->isolate(); }
+
 bool LCodeGenBase::GenerateBody() {
   DCHECK(is_generating());
   bool emit_instructions = true;
diff --git a/src/crankshaft/lithium-codegen.h b/src/crankshaft/lithium-codegen.h
index fbf9692..c6bf447 100644
--- a/src/crankshaft/lithium-codegen.h
+++ b/src/crankshaft/lithium-codegen.h
@@ -6,13 +6,13 @@
 #define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
 
 #include "src/bailout-reason.h"
-#include "src/compiler.h"
 #include "src/deoptimizer.h"
 #include "src/source-position-table.h"
 
 namespace v8 {
 namespace internal {
 
+class CompilationInfo;
 class HGraph;
 class LChunk;
 class LEnvironment;
@@ -29,7 +29,7 @@
   // Simple accessors.
   MacroAssembler* masm() const { return masm_; }
   CompilationInfo* info() const { return info_; }
-  Isolate* isolate() const { return info_->isolate(); }
+  Isolate* isolate() const;
   Factory* factory() const { return isolate()->factory(); }
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h
index a2c0283..d04bd56 100644
--- a/src/crankshaft/lithium.h
+++ b/src/crankshaft/lithium.h
@@ -12,7 +12,7 @@
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/safepoint-table.h"
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index 6be0d13..b24b1c5 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -171,7 +171,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in a1.
@@ -179,7 +179,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(a1);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2499,20 +2499,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ li(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).is(v0));
@@ -3448,7 +3434,9 @@
   // Math.sqrt(-Infinity) == NaN
   Label done;
   __ Move(temp, static_cast<double>(-V8_INFINITY));
+  // Set up Infinity.
   __ Neg_d(result, temp);
+  // result is overwritten if the branch is not taken.
   __ BranchF(&done, NULL, eq, temp, input);
 
   // Add +0 to convert -0 to +0.
@@ -3800,21 +3788,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   Operand operand(0);
@@ -4025,21 +3998,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5042,7 +5000,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5145,7 +5103,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
index d51f62c..bb09abc 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -340,8 +340,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index a7880ee..5533b8f 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -311,15 +311,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -352,15 +343,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -887,7 +869,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1024,6 +1006,9 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1032,15 +1017,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2127,26 +2117,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result =
-      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2223,20 +2193,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), a1);
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index 9711c9a..f49fb93 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -131,9 +131,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -1969,33 +1967,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2026,34 +1997,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 924f552..5f93e55 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -147,7 +147,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in a1.
@@ -155,7 +155,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(a1);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2623,20 +2623,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ li(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).is(v0));
@@ -3655,7 +3641,9 @@
   // Math.sqrt(-Infinity) == NaN
   Label done;
   __ Move(temp, static_cast<double>(-V8_INFINITY));
+  // Set up Infinity.
   __ Neg_d(result, temp);
+  // result is overwritten if the branch is not taken.
   __ BranchF(&done, NULL, eq, temp, input);
 
   // Add +0 to convert -0 to +0.
@@ -4013,21 +4001,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   Operand operand((int64_t)0);
@@ -4260,21 +4233,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5248,7 +5206,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5353,7 +5311,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
index 41d8b2c..aaa2e6b 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -343,8 +343,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
   Scope* const scope_;
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index 922f12a..0855754 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -311,15 +311,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -352,15 +343,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -887,7 +869,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1024,6 +1006,9 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1032,15 +1017,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2132,26 +2122,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result =
-      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2228,20 +2198,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), a1);
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index f8b5c48..7bc89af 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -133,9 +133,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2015,33 +2013,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2072,34 +2043,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index e1203b8..95018e8 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -158,7 +158,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in r4.
@@ -166,7 +166,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(r4);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2677,20 +2677,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).is(r3));
@@ -4085,21 +4071,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Representation representation = instr->hydrogen()->length()->representation();
   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4344,21 +4315,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5324,7 +5280,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5430,7 +5386,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
index fe212d4..a4a90a7 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -277,8 +277,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index 958620c..738cf23 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -317,15 +317,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -358,15 +349,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -892,7 +874,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1030,6 +1012,9 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1038,15 +1023,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result =
       new (zone()) LCallWithDescriptor(descriptor, ops, zone());
@@ -2150,26 +2140,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result =
-      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2245,19 +2215,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), r4);
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index f26bfc5..626f00a 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -134,9 +134,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -1954,33 +1952,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2015,34 +1986,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object, LOperand* context,
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
index ec2a85a..4511bb9 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -66,8 +66,8 @@
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator save_iterator(doubles);
   while (!save_iterator.Done()) {
-    __ std(DoubleRegister::from_code(save_iterator.Current()),
-           MemOperand(sp, count * kDoubleSize));
+    __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()),
+                   MemOperand(sp, count * kDoubleSize));
     save_iterator.Advance();
     count++;
   }
@@ -81,8 +81,8 @@
   BitVector::Iterator save_iterator(doubles);
   int count = 0;
   while (!save_iterator.Done()) {
-    __ ld(DoubleRegister::from_code(save_iterator.Current()),
-          MemOperand(sp, count * kDoubleSize));
+    __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()),
+                  MemOperand(sp, count * kDoubleSize));
     save_iterator.Advance();
     count++;
   }
@@ -148,7 +148,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info()->scope()->num_heap_slots() > 0) {
+  if (info()->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in r3.
@@ -156,7 +156,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(r3);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2089,7 +2089,8 @@
       EmitBranch(instr, al);
     } else if (type.IsHeapNumber()) {
       DCHECK(!info()->IsStub());
-      __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      __ LoadDouble(dbl_scratch,
+                    FieldMemOperand(reg, HeapNumber::kValueOffset));
       // Test the double value. Zero and NaN are false.
       __ lzdr(kDoubleRegZero);
       __ cdbr(dbl_scratch, kDoubleRegZero);
@@ -2652,19 +2653,6 @@
   __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
 }
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->result()).is(r2));
@@ -2739,7 +2727,7 @@
   if (instr->hydrogen()->representation().IsDouble()) {
     DCHECK(access.IsInobject());
     DoubleRegister result = ToDoubleRegister(instr->result());
-    __ ld(result, FieldMemOperand(object, offset));
+    __ LoadDouble(result, FieldMemOperand(object, offset));
     return;
   }
 
@@ -2889,9 +2877,10 @@
       }
     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
       if (!use_scratch) {
-        __ ld(result, MemOperand(external_pointer, base_offset));
+        __ LoadDouble(result, MemOperand(external_pointer, base_offset));
       } else {
-        __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
+        __ LoadDouble(result,
+                      MemOperand(scratch0(), external_pointer, base_offset));
       }
     }
   } else {
@@ -2986,9 +2975,9 @@
   }
 
   if (!use_scratch) {
-    __ ld(result, MemOperand(elements, base_offset));
+    __ LoadDouble(result, MemOperand(elements, base_offset));
   } else {
-    __ ld(result, MemOperand(scratch, elements, base_offset));
+    __ LoadDouble(result, MemOperand(scratch, elements, base_offset));
   }
 
   if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3919,7 +3908,7 @@
     DCHECK(!hinstr->NeedsWriteBarrier());
     DoubleRegister value = ToDoubleRegister(instr->value());
     DCHECK(offset >= 0);
-    __ std(value, FieldMemOperand(object, offset));
+    __ StoreDouble(value, FieldMemOperand(object, offset));
     return;
   }
 
@@ -3944,7 +3933,7 @@
   if (FLAG_unbox_double_fields && representation.IsDouble()) {
     DCHECK(access.IsInobject());
     DoubleRegister value = ToDoubleRegister(instr->value());
-    __ std(value, FieldMemOperand(object, offset));
+    __ StoreDouble(value, FieldMemOperand(object, offset));
     if (hinstr->NeedsWriteBarrier()) {
       record_value = ToRegister(instr->value());
     }
@@ -3984,20 +3973,6 @@
   }
 }
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Representation representation = instr->hydrogen()->length()->representation();
   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4187,14 +4162,15 @@
     __ CanonicalizeNaN(double_scratch, value);
     DCHECK(address_offset >= 0);
     if (use_scratch)
-      __ std(double_scratch, MemOperand(scratch, elements, address_offset));
+      __ StoreDouble(double_scratch,
+                     MemOperand(scratch, elements, address_offset));
     else
-      __ std(double_scratch, MemOperand(elements, address_offset));
+      __ StoreDouble(double_scratch, MemOperand(elements, address_offset));
   } else {
     if (use_scratch)
-      __ std(value, MemOperand(scratch, elements, address_offset));
+      __ StoreDouble(value, MemOperand(scratch, elements, address_offset));
     else
-      __ std(value, MemOperand(elements, address_offset));
+      __ StoreDouble(value, MemOperand(elements, address_offset));
   }
 }
 
@@ -4286,20 +4262,6 @@
   }
 }
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -4789,7 +4751,8 @@
       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
     }
     // load heap number
-    __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    __ LoadDouble(result_reg,
+                  FieldMemOperand(input_reg, HeapNumber::kValueOffset));
     if (deoptimize_on_minus_zero) {
       __ TestDoubleIsMinusZero(result_reg, scratch, ip);
       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
@@ -4801,7 +4764,8 @@
       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
-      __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ LoadDouble(result_reg,
+                    FieldMemOperand(scratch, HeapNumber::kValueOffset));
       __ b(&done, Label::kNear);
     }
   } else {
@@ -4862,8 +4826,8 @@
     // Deoptimize if we don't have a heap number.
     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
 
-    __ ld(double_scratch2,
-          FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    __ LoadDouble(double_scratch2,
+                  FieldMemOperand(input_reg, HeapNumber::kValueOffset));
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       // preserve heap number pointer in scratch2 for minus zero check below
       __ LoadRR(scratch2, input_reg);
@@ -5177,7 +5141,7 @@
 
   // Heap number
   __ bind(&heap_number);
-  __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   __ b(&done, Label::kNear);
 
@@ -5224,7 +5188,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5337,7 +5301,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, scratch1, scratch2, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h
index e5df255..30e9d2b 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/src/crankshaft/s390/lithium-codegen-s390.h
@@ -276,8 +276,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
index 3048e4c..bf9dfd5 100644
--- a/src/crankshaft/s390/lithium-s390.cc
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -287,14 +287,6 @@
   value()->PrintTo(stream);
 }
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -325,14 +317,6 @@
   }
 }
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -815,7 +799,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -937,6 +921,9 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -945,15 +932,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), cp);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result =
       new (zone()) LCallWithDescriptor(descriptor, ops, zone());
@@ -1968,25 +1960,6 @@
   return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
 }
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result =
-      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2058,18 +2031,6 @@
   return new (zone()) LStoreNamedField(obj, val, temp);
 }
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), r3);
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
index 1f1e520..70670ac 100644
--- a/src/crankshaft/s390/lithium-s390.h
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -132,9 +132,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -1822,32 +1820,6 @@
   }
 };
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -1881,33 +1853,6 @@
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object, LOperand* context,
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index 5961838..d2b56e2 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -4,11 +4,12 @@
 
 #include "src/crankshaft/typing.h"
 
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
-#include "src/frames.h"
+#include "src/ast/variables.h"
 #include "src/frames-inl.h"
+#include "src/frames.h"
 #include "src/ostreams.h"
-#include "src/parsing/parser.h"  // for CompileTimeValue; TODO(rossberg): move
 #include "src/splay-tree-inl.h"
 
 namespace v8 {
@@ -33,20 +34,20 @@
 
 
 #ifdef OBJECT_PRINT
-  static void PrintObserved(Variable* var, Object* value, Type* type) {
-    OFStream os(stdout);
-    os << "  observed " << (var->IsParameter() ? "param" : "local") << "  ";
-    var->name()->Print(os);
-    os << " : " << Brief(value) << " -> ";
-    type->PrintTo(os);
-    os << std::endl;
+static void PrintObserved(Variable* var, Object* value, AstType* type) {
+  OFStream os(stdout);
+  os << "  observed " << (var->IsParameter() ? "param" : "local") << "  ";
+  var->name()->Print(os);
+  os << " : " << Brief(value) << " -> ";
+  type->PrintTo(os);
+  os << std::endl;
   }
 #endif  // OBJECT_PRINT
 
 
 Effect AstTyper::ObservedOnStack(Object* value) {
-  Type* lower = Type::NowOf(value, zone());
-  return Effect(Bounds(lower, Type::Any()));
+  AstType* lower = AstType::NowOf(value, zone());
+  return Effect(AstBounds(lower, AstType::Any()));
 }
 
 
@@ -84,15 +85,16 @@
                     store_.LookupBounds(parameter_index(i)).lower);
     }
 
-    ZoneList<Variable*> local_vars(locals, zone());
-    ZoneList<Variable*> context_vars(scope_->ContextLocalCount(), zone());
-    ZoneList<Variable*> global_vars(scope_->ContextGlobalCount(), zone());
-    scope_->CollectStackAndContextLocals(&local_vars, &context_vars,
-                                         &global_vars);
-    for (int i = 0; i < locals; i++) {
-      PrintObserved(local_vars.at(i),
-                    frame->GetExpression(i),
-                    store_.LookupBounds(stack_local_index(i)).lower);
+    ZoneList<Variable*>* local_vars = scope_->locals();
+    int local_index = 0;
+    for (int i = 0; i < local_vars->length(); i++) {
+      Variable* var = local_vars->at(i);
+      if (var->IsStackLocal()) {
+        PrintObserved(
+            var, frame->GetExpression(local_index),
+            store_.LookupBounds(stack_local_index(local_index)).lower);
+        local_index++;
+      }
     }
   }
 #endif  // OBJECT_PRINT
@@ -205,11 +207,12 @@
     if (!clause->is_default()) {
       Expression* label = clause->label();
       // Collect type feedback.
-      Type* tag_type;
-      Type* label_type;
-      Type* combined_type;
+      AstType* tag_type;
+      AstType* label_type;
+      AstType* combined_type;
       oracle()->CompareType(clause->CompareId(),
-                            &tag_type, &label_type, &combined_type);
+                            clause->CompareOperationFeedbackSlot(), &tag_type,
+                            &label_type, &combined_type);
       NarrowLowerType(stmt->tag(), tag_type);
       NarrowLowerType(label, label_type);
       clause->set_compare_type(combined_type);
@@ -366,8 +369,8 @@
   store_.Seq(then_effects);
 
   NarrowType(expr,
-             Bounds::Either(bounds_->get(expr->then_expression()),
-                            bounds_->get(expr->else_expression()), zone()));
+             AstBounds::Either(bounds_->get(expr->then_expression()),
+                               bounds_->get(expr->else_expression()), zone()));
 }
 
 
@@ -380,14 +383,14 @@
 
 
 void AstTyper::VisitLiteral(Literal* expr) {
-  Type* type = Type::Constant(expr->value(), zone());
-  NarrowType(expr, Bounds(type));
+  AstType* type = AstType::Constant(expr->value(), zone());
+  NarrowType(expr, AstBounds(type));
 }
 
 
 void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
   // TODO(rossberg): Reintroduce RegExp type.
-  NarrowType(expr, Bounds(Type::Object()));
+  NarrowType(expr, AstBounds(AstType::Object()));
 }
 
 
@@ -415,7 +418,7 @@
     RECURSE(Visit(prop->value()));
   }
 
-  NarrowType(expr, Bounds(Type::Object()));
+  NarrowType(expr, AstBounds(AstType::Object()));
 }
 
 
@@ -426,7 +429,7 @@
     RECURSE(Visit(value));
   }
 
-  NarrowType(expr, Bounds(Type::Object()));
+  NarrowType(expr, AstBounds(AstType::Object()));
 }
 
 
@@ -479,7 +482,7 @@
   RECURSE(Visit(expr->exception()));
   // TODO(rossberg): is it worth having a non-termination effect?
 
-  NarrowType(expr, Bounds(Type::None()));
+  NarrowType(expr, AstBounds(AstType::None()));
 }
 
 
@@ -562,7 +565,7 @@
     RECURSE(Visit(arg));
   }
 
-  NarrowType(expr, Bounds(Type::None(), Type::Receiver()));
+  NarrowType(expr, AstBounds(AstType::None(), AstType::Receiver()));
 }
 
 
@@ -589,13 +592,13 @@
   switch (expr->op()) {
     case Token::NOT:
     case Token::DELETE:
-      NarrowType(expr, Bounds(Type::Boolean()));
+      NarrowType(expr, AstBounds(AstType::Boolean()));
       break;
     case Token::VOID:
-      NarrowType(expr, Bounds(Type::Undefined()));
+      NarrowType(expr, AstBounds(AstType::Undefined()));
       break;
     case Token::TYPEOF:
-      NarrowType(expr, Bounds(Type::InternalizedString()));
+      NarrowType(expr, AstBounds(AstType::InternalizedString()));
       break;
     default:
       UNREACHABLE();
@@ -612,12 +615,13 @@
   oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
   expr->set_store_mode(store_mode);
   expr->set_key_type(key_type);
-  expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
+  expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId(),
+                                     expr->CountBinaryOpFeedbackSlot()));
   // TODO(rossberg): merge the count type with the generic expression type.
 
   RECURSE(Visit(expr->expression()));
 
-  NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+  NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
 
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -625,17 +629,18 @@
   }
 }
 
-
 void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
   // Collect type feedback.
-  Type* type;
-  Type* left_type;
-  Type* right_type;
+  AstType* type;
+  AstType* left_type;
+  AstType* right_type;
   Maybe<int> fixed_right_arg = Nothing<int>();
   Handle<AllocationSite> allocation_site;
   oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
-      &left_type, &right_type, &type, &fixed_right_arg,
-      &allocation_site, expr->op());
+                       expr->BinaryOperationFeedbackSlot(), &left_type,
+                       &right_type, &type, &fixed_right_arg, &allocation_site,
+                       expr->op());
+
   NarrowLowerType(expr, type);
   NarrowLowerType(expr->left(), left_type);
   NarrowLowerType(expr->right(), right_type);
@@ -662,19 +667,21 @@
       left_effects.Alt(right_effects);
       store_.Seq(left_effects);
 
-      NarrowType(expr, Bounds::Either(bounds_->get(expr->left()),
-                                      bounds_->get(expr->right()), zone()));
+      NarrowType(expr, AstBounds::Either(bounds_->get(expr->left()),
+                                         bounds_->get(expr->right()), zone()));
       break;
     }
     case Token::BIT_OR:
     case Token::BIT_AND: {
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      Type* upper = Type::Union(bounds_->get(expr->left()).upper,
-                                bounds_->get(expr->right()).upper, zone());
-      if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
-      Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
-      NarrowType(expr, Bounds(lower, upper));
+      AstType* upper =
+          AstType::Union(bounds_->get(expr->left()).upper,
+                         bounds_->get(expr->right()).upper, zone());
+      if (!upper->Is(AstType::Signed32())) upper = AstType::Signed32();
+      AstType* lower =
+          AstType::Intersect(AstType::SignedSmall(), upper, zone());
+      NarrowType(expr, AstBounds(lower, upper));
       break;
     }
     case Token::BIT_XOR:
@@ -682,7 +689,7 @@
     case Token::SAR:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Signed32()));
+      NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Signed32()));
       break;
     case Token::SHR:
       RECURSE(Visit(expr->left()));
@@ -690,28 +697,29 @@
       // TODO(rossberg): The upper bound would be Unsigned32, but since there
       // is no 'positive Smi' type for the lower bound, we use the smallest
       // union of Smi and Unsigned32 as upper bound instead.
-      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+      NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
       break;
     case Token::ADD: {
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      Bounds l = bounds_->get(expr->left());
-      Bounds r = bounds_->get(expr->right());
-      Type* lower =
+      AstBounds l = bounds_->get(expr->left());
+      AstBounds r = bounds_->get(expr->right());
+      AstType* lower =
           !l.lower->IsInhabited() || !r.lower->IsInhabited()
-              ? Type::None()
-              : l.lower->Is(Type::String()) || r.lower->Is(Type::String())
-                    ? Type::String()
-                    : l.lower->Is(Type::Number()) && r.lower->Is(Type::Number())
-                          ? Type::SignedSmall()
-                          : Type::None();
-      Type* upper =
-          l.upper->Is(Type::String()) || r.upper->Is(Type::String())
-              ? Type::String()
-              : l.upper->Is(Type::Number()) && r.upper->Is(Type::Number())
-                    ? Type::Number()
-                    : Type::NumberOrString();
-      NarrowType(expr, Bounds(lower, upper));
+              ? AstType::None()
+              : l.lower->Is(AstType::String()) || r.lower->Is(AstType::String())
+                    ? AstType::String()
+                    : l.lower->Is(AstType::Number()) &&
+                              r.lower->Is(AstType::Number())
+                          ? AstType::SignedSmall()
+                          : AstType::None();
+      AstType* upper =
+          l.upper->Is(AstType::String()) || r.upper->Is(AstType::String())
+              ? AstType::String()
+              : l.upper->Is(AstType::Number()) && r.upper->Is(AstType::Number())
+                    ? AstType::Number()
+                    : AstType::NumberOrString();
+      NarrowType(expr, AstBounds(lower, upper));
       break;
     }
     case Token::SUB:
@@ -720,7 +728,7 @@
     case Token::MOD:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+      NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
       break;
     default:
       UNREACHABLE();
@@ -730,11 +738,12 @@
 
 void AstTyper::VisitCompareOperation(CompareOperation* expr) {
   // Collect type feedback.
-  Type* left_type;
-  Type* right_type;
-  Type* combined_type;
+  AstType* left_type;
+  AstType* right_type;
+  AstType* combined_type;
   oracle()->CompareType(expr->CompareOperationFeedbackId(),
-      &left_type, &right_type, &combined_type);
+                        expr->CompareOperationFeedbackSlot(), &left_type,
+                        &right_type, &combined_type);
   NarrowLowerType(expr->left(), left_type);
   NarrowLowerType(expr->right(), right_type);
   expr->set_combined_type(combined_type);
@@ -742,7 +751,7 @@
   RECURSE(Visit(expr->left()));
   RECURSE(Visit(expr->right()));
 
-  NarrowType(expr, Bounds(Type::Boolean()));
+  NarrowType(expr, AstBounds(AstType::Boolean()));
 }
 
 
@@ -767,6 +776,14 @@
   Visit(expr->expression());
 }
 
+int AstTyper::variable_index(Variable* var) {
+  // Stack locals have the range [0 .. l]
+  // Parameters have the range [-1 .. p]
+  // We map this to [-p-2 .. -1, 0 .. l]
+  return var->IsStackLocal()
+             ? stack_local_index(var->index())
+             : var->IsParameter() ? parameter_index(var->index()) : kNoVar;
+}
 
 void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
   for (int i = 0; i < decls->length(); ++i) {
diff --git a/src/crankshaft/typing.h b/src/crankshaft/typing.h
index 94340c5..eb88634 100644
--- a/src/crankshaft/typing.h
+++ b/src/crankshaft/typing.h
@@ -7,16 +7,18 @@
 
 #include "src/allocation.h"
 #include "src/ast/ast-type-bounds.h"
-#include "src/ast/scopes.h"
+#include "src/ast/ast-types.h"
+#include "src/ast/ast.h"
 #include "src/ast/variables.h"
 #include "src/effects.h"
 #include "src/type-info.h"
-#include "src/types.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
+class DeclarationScope;
+class Isolate;
 class FunctionLiteral;
 
 class AstTyper final : public AstVisitor<AstTyper> {
@@ -49,11 +51,11 @@
   Zone* zone() const { return zone_; }
   TypeFeedbackOracle* oracle() { return &oracle_; }
 
-  void NarrowType(Expression* e, Bounds b) {
-    bounds_->set(e, Bounds::Both(bounds_->get(e), b, zone()));
+  void NarrowType(Expression* e, AstBounds b) {
+    bounds_->set(e, AstBounds::Both(bounds_->get(e), b, zone()));
   }
-  void NarrowLowerType(Expression* e, Type* t) {
-    bounds_->set(e, Bounds::NarrowLower(bounds_->get(e), t, zone()));
+  void NarrowLowerType(Expression* e, AstType* t) {
+    bounds_->set(e, AstBounds::NarrowLower(bounds_->get(e), t, zone()));
   }
 
   Effects EnterEffects() {
@@ -65,13 +67,7 @@
   int parameter_index(int index) { return -index - 2; }
   int stack_local_index(int index) { return index; }
 
-  int variable_index(Variable* var) {
-    // Stack locals have the range [0 .. l]
-    // Parameters have the range [-1 .. p]
-    // We map this to [-p-2 .. -1, 0 .. l]
-    return var->IsStackLocal() ? stack_local_index(var->index()) :
-           var->IsParameter() ? parameter_index(var->index()) : kNoVar;
-  }
+  int variable_index(Variable* var);
 
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void VisitStatements(ZoneList<Statement*>* statements);
diff --git a/src/crankshaft/unique.h b/src/crankshaft/unique.h
index 54abfa7..4c6a097 100644
--- a/src/crankshaft/unique.h
+++ b/src/crankshaft/unique.h
@@ -11,7 +11,7 @@
 #include "src/base/functional.h"
 #include "src/handles.h"
 #include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 66046a4..50e2aa0 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -167,7 +167,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in rdi.
@@ -175,7 +175,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ Push(rdi);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2539,20 +2539,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ Move(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ Move(slot_register, Smi::FromInt(index));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
   DCHECK(ToRegister(instr->result()).is(rax));
@@ -3902,21 +3888,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Representation representation = instr->hydrogen()->length()->representation();
   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4158,21 +4129,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   class DeferredMaybeGrowElements final : public LDeferredCode {
    public:
@@ -5110,7 +5066,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5153,7 +5109,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, temp, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
index 22c39ad..22a32a1 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -297,8 +297,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
 #ifdef _MSC_VER
   // On windows, you may not access the stack more than one page below
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index 4245169..18fb5d4 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -348,15 +348,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -389,15 +380,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -907,7 +889,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1042,6 +1024,9 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1050,15 +1035,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), rsi);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2223,26 +2213,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result = new (zone())
-      LStoreKeyedGeneric(context, object, key, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2337,20 +2307,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* left = UseFixed(instr->left(), rdx);
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index 5c0ce04..e7eaa01 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -132,9 +132,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2013,33 +2011,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2068,34 +2039,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index 1a42d5b..2d597d4 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -134,7 +134,7 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
@@ -142,7 +142,7 @@
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(edi);
-      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ Push(info()->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
@@ -2681,20 +2681,6 @@
 }
 
 
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = ToRegister(instr->temp_slot());
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ mov(vector_register, vector);
-  FeedbackVectorSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->result()).is(eax));
@@ -3703,7 +3689,9 @@
   __ PrepareCallCFunction(2, eax);
   __ fstp_d(MemOperand(esp, 0));
   X87PrepareToWrite(result);
+  __ X87SetFPUCW(0x027F);
   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+  __ X87SetFPUCW(0x037F);
   // Return value is in st(0) on ia32.
   X87CommitWrite(result);
 }
@@ -3717,7 +3705,9 @@
   __ PrepareCallCFunction(2, eax);
   __ fstp_d(MemOperand(esp, 0));
   X87PrepareToWrite(result);
+  __ X87SetFPUCW(0x027F);
   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+  __ X87SetFPUCW(0x037F);
   // Return value is in st(0) on ia32.
   X87CommitWrite(result);
 }
@@ -3976,21 +3966,6 @@
 }
 
 
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
-  __ mov(StoreDescriptor::NameRegister(), instr->name());
-  Handle<Code> ic =
-      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
-          .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
   if (instr->index()->IsConstantOperand()) {
@@ -4199,21 +4174,6 @@
 }
 
 
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
-  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
-                        isolate(), instr->language_mode())
-                        .code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   Register object = ToRegister(instr->object());
   Register temp = ToRegister(instr->temp());
@@ -5315,7 +5275,7 @@
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   } else {
     Register size = ToRegister(instr->size());
@@ -5358,7 +5318,7 @@
   }
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    CHECK(size <= kMaxRegularHeapObjectSize);
     __ FastAllocate(size, result, temp, flags);
   } else {
     Register size = ToRegister(instr->size());
diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h
index cdf02f3..850f330 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/src/crankshaft/x87/lithium-codegen-x87.h
@@ -323,8 +323,6 @@
 
   template <class T>
   void EmitVectorLoadICRegisters(T* instr);
-  template <class T>
-  void EmitVectorStoreICRegisters(T* instr);
 
   void EmitReturn(LReturn* instr);
 
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index f614b93..a319c0c 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -362,15 +362,6 @@
 }
 
 
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LLoadKeyed::PrintDataTo(StringStream* stream) {
   elements()->PrintTo(stream);
   stream->Add("[");
@@ -403,15 +394,6 @@
 }
 
 
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
 void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -925,7 +907,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->scope()->num_heap_slots() > 0) {
+  if (info_->scope()->NeedsContext()) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -1071,6 +1053,10 @@
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
+  DCHECK_EQ(descriptor.GetParameterCount() +
+                LCallWithDescriptor::kImplicitRegisterParameterCount,
+            instr->OperandCount());
+
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   // Target
@@ -1078,15 +1064,20 @@
   // Context
   LOperand* op = UseFixed(instr->OperandAt(1), esi);
   ops.Add(op, zone());
-  // Other register parameters
-  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
-       i < instr->OperandCount(); i++) {
-    op =
-        UseFixed(instr->OperandAt(i),
-                 descriptor.GetRegisterParameter(
-                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+  // Load register parameters.
+  int i = 0;
+  for (; i < descriptor.GetRegisterParameterCount(); i++) {
+    op = UseFixed(instr->OperandAt(
+                      i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+                  descriptor.GetRegisterParameter(i));
     ops.Add(op, zone());
   }
+  // Push stack parameters.
+  for (; i < descriptor.GetParameterCount(); i++) {
+    op = UseAny(instr->OperandAt(
+        i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+    AddInstruction(new (zone()) LPushArgument(op), instr);
+  }
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
@@ -2213,26 +2204,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreKeyedGeneric* result = new (zone())
-      LStoreKeyedGeneric(context, object, key, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2334,20 +2305,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-  LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
-  LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
-  LStoreNamedGeneric* result =
-      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
-  return MarkAsCall(result, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index 3ef8f75..e2b8043 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -135,9 +135,7 @@
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
   V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
   V(StringAdd)                               \
   V(StringCharCodeAt)                        \
   V(StringCharFromCode)                      \
@@ -2008,32 +2006,6 @@
 };
 
 
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
-                     LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
@@ -2064,34 +2036,6 @@
 };
 
 
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
-  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
-                     LOperand* value, LOperand* slot, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
diff --git a/src/d8.cc b/src/d8.cc
index a8af9de..01801f8 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -2,30 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-
-// Defined when linking against shared lib on Windows.
-#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
-#define V8_SHARED
-#endif
-
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/stat.h>
 
-#ifdef V8_SHARED
-#include <assert.h>
-#endif  // V8_SHARED
-
-#ifndef V8_SHARED
 #include <algorithm>
 #include <fstream>
+#include <map>
+#include <utility>
 #include <vector>
-#endif  // !V8_SHARED
-
-#ifdef V8_SHARED
-#include "include/v8-testing.h"
-#endif  // V8_SHARED
 
 #ifdef ENABLE_VTUNE_JIT_INTERFACE
 #include "src/third_party/vtune/v8-vtune.h"
@@ -36,7 +22,6 @@
 
 #include "include/libplatform/libplatform.h"
 #include "include/libplatform/v8-tracing.h"
-#ifndef V8_SHARED
 #include "src/api.h"
 #include "src/base/cpu.h"
 #include "src/base/debug/stack_trace.h"
@@ -48,7 +33,6 @@
 #include "src/snapshot/natives.h"
 #include "src/utils.h"
 #include "src/v8.h"
-#endif  // !V8_SHARED
 
 #if !defined(_WIN32) && !defined(_WIN64)
 #include <unistd.h>  // NOLINT
@@ -72,9 +56,7 @@
 namespace {
 
 const int MB = 1024 * 1024;
-#ifndef V8_SHARED
 const int kMaxWorkers = 50;
-#endif
 
 
 class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
@@ -102,7 +84,6 @@
 };
 
 
-#ifndef V8_SHARED
 // Predictable v8::Platform implementation. All background and foreground
 // tasks are run immediately, delayed tasks are not executed at all.
 class PredictablePlatform : public Platform {
@@ -136,6 +117,7 @@
     return synthetic_time_in_sec_ += 0.00001;
   }
 
+  using Platform::AddTraceEvent;
   uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
                          const char* name, const char* scope, uint64_t id,
                          uint64_t bind_id, int numArgs, const char** argNames,
@@ -163,7 +145,6 @@
 
   DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
 };
-#endif  // !V8_SHARED
 
 
 v8::Platform* g_platform = NULL;
@@ -176,7 +157,6 @@
 }
 
 
-#ifndef V8_SHARED
 bool FindInObjectList(Local<Object> object, const Shell::ObjectList& list) {
   for (int i = 0; i < list.length(); ++i) {
     if (list[i]->StrictEquals(object)) {
@@ -202,7 +182,6 @@
 
   return worker;
 }
-#endif  // !V8_SHARED
 
 
 }  // namespace
@@ -370,7 +349,6 @@
 };
 
 
-#ifndef V8_SHARED
 CounterMap* Shell::counter_map_;
 base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
@@ -383,20 +361,17 @@
 bool Shell::allow_new_workers_ = true;
 i::List<Worker*> Shell::workers_;
 i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
-#endif  // !V8_SHARED
 
 Global<Context> Shell::evaluation_context_;
 ArrayBuffer::Allocator* Shell::array_buffer_allocator;
 ShellOptions Shell::options;
 base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
 
-#ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
   const char* name2 = reinterpret_cast<const char*>(key2);
   return strcmp(name1, name2) == 0;
 }
-#endif  // !V8_SHARED
 
 
 // Converts a V8 value to a C string.
@@ -460,18 +435,12 @@
 // Compile a string within the current v8 context.
 MaybeLocal<Script> Shell::CompileString(
     Isolate* isolate, Local<String> source, Local<Value> name,
-    ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
+    ScriptCompiler::CompileOptions compile_options) {
   Local<Context> context(isolate->GetCurrentContext());
   ScriptOrigin origin(name);
-  // TODO(adamk): Make use of compile options for Modules.
-  if (compile_options == ScriptCompiler::kNoCompileOptions ||
-      source_type == MODULE) {
+  if (compile_options == ScriptCompiler::kNoCompileOptions) {
     ScriptCompiler::Source script_source(source, origin);
-    return source_type == SCRIPT
-               ? ScriptCompiler::Compile(context, &script_source,
-                                         compile_options)
-               : ScriptCompiler::CompileModule(context, &script_source,
-                                               compile_options);
+    return ScriptCompiler::Compile(context, &script_source, compile_options);
   }
 
   ScriptCompiler::CachedData* data =
@@ -485,7 +454,6 @@
     DCHECK(false);  // A new compile option?
   }
   if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
-  DCHECK_EQ(SCRIPT, source_type);
   MaybeLocal<Script> result =
       ScriptCompiler::Compile(context, &cached_source, compile_options);
   CHECK(data == NULL || !data->rejected);
@@ -496,7 +464,7 @@
 // Executes a string within the current v8 context.
 bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
                           Local<Value> name, bool print_result,
-                          bool report_exceptions, SourceType source_type) {
+                          bool report_exceptions) {
   HandleScope handle_scope(isolate);
   TryCatch try_catch(isolate);
   try_catch.SetVerbose(true);
@@ -508,8 +476,8 @@
         Local<Context>::New(isolate, data->realms_[data->realm_current_]);
     Context::Scope context_scope(realm);
     Local<Script> script;
-    if (!Shell::CompileString(isolate, source, name, options.compile_options,
-                              source_type).ToLocal(&script)) {
+    if (!Shell::CompileString(isolate, source, name, options.compile_options)
+             .ToLocal(&script)) {
       // Print errors that happened during compilation.
       if (report_exceptions) ReportException(isolate, &try_catch);
       return false;
@@ -527,9 +495,7 @@
   }
   DCHECK(!try_catch.HasCaught());
   if (print_result) {
-#if !defined(V8_SHARED)
     if (options.test_shell) {
-#endif
       if (!result->IsUndefined()) {
         // If all went well and the result wasn't undefined then print
         // the returned value.
@@ -537,17 +503,160 @@
         fwrite(*str, sizeof(**str), str.length(), stdout);
         printf("\n");
       }
-#if !defined(V8_SHARED)
     } else {
       v8::String::Utf8Value str(Stringify(isolate, result));
       fwrite(*str, sizeof(**str), str.length(), stdout);
       printf("\n");
     }
-#endif
   }
   return true;
 }
 
+namespace {
+
+std::string ToSTLString(Local<String> v8_str) {
+  String::Utf8Value utf8(v8_str);
+  // Should not be able to fail since the input is a String.
+  CHECK(*utf8);
+  return *utf8;
+}
+
+bool IsAbsolutePath(const std::string& path) {
+#if defined(_WIN32) || defined(_WIN64)
+  // TODO(adamk): This is an incorrect approximation, but should
+  // work for all our test-running cases.
+  return path.find(':') != std::string::npos;
+#else
+  return path[0] == '/';
+#endif
+}
+
+std::string GetWorkingDirectory() {
+#if defined(_WIN32) || defined(_WIN64)
+  char system_buffer[MAX_PATH];
+  // TODO(adamk): Support Unicode paths.
+  DWORD len = GetCurrentDirectoryA(MAX_PATH, system_buffer);
+  CHECK(len > 0);
+  return system_buffer;
+#else
+  char curdir[PATH_MAX];
+  CHECK_NOT_NULL(getcwd(curdir, PATH_MAX));
+  return curdir;
+#endif
+}
+
+// Returns the directory part of path, without the trailing '/'.
+std::string DirName(const std::string& path) {
+  DCHECK(IsAbsolutePath(path));
+  size_t last_slash = path.find_last_of('/');
+  DCHECK(last_slash != std::string::npos);
+  return path.substr(0, last_slash);
+}
+
+std::string EnsureAbsolutePath(const std::string& path,
+                               const std::string& dir_name) {
+  return IsAbsolutePath(path) ? path : dir_name + '/' + path;
+}
+
+MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
+                                         Local<String> specifier,
+                                         Local<Module> referrer,
+                                         Local<Value> data) {
+  Isolate* isolate = context->GetIsolate();
+  auto module_map = static_cast<std::map<std::string, Global<Module>>*>(
+      External::Cast(*data)->Value());
+  Local<String> dir_name = Local<String>::Cast(referrer->GetEmbedderData());
+  std::string absolute_path =
+      EnsureAbsolutePath(ToSTLString(specifier), ToSTLString(dir_name));
+  auto it = module_map->find(absolute_path);
+  if (it != module_map->end()) {
+    return it->second.Get(isolate);
+  }
+  return MaybeLocal<Module>();
+}
+
+}  // anonymous namespace
+
+MaybeLocal<Module> Shell::FetchModuleTree(
+    Isolate* isolate, const std::string& file_name,
+    std::map<std::string, Global<Module>>* module_map) {
+  DCHECK(IsAbsolutePath(file_name));
+  TryCatch try_catch(isolate);
+  try_catch.SetVerbose(true);
+  Local<String> source_text = ReadFile(isolate, file_name.c_str());
+  if (source_text.IsEmpty()) {
+    printf("Error reading '%s'\n", file_name.c_str());
+    Shell::Exit(1);
+  }
+  ScriptOrigin origin(
+      String::NewFromUtf8(isolate, file_name.c_str(), NewStringType::kNormal)
+          .ToLocalChecked());
+  ScriptCompiler::Source source(source_text, origin);
+  Local<Module> module;
+  if (!ScriptCompiler::CompileModule(isolate, &source).ToLocal(&module)) {
+    ReportException(isolate, &try_catch);
+    return MaybeLocal<Module>();
+  }
+  module_map->insert(
+      std::make_pair(file_name, Global<Module>(isolate, module)));
+
+  std::string dir_name = DirName(file_name);
+  module->SetEmbedderData(
+      String::NewFromUtf8(isolate, dir_name.c_str(), NewStringType::kNormal)
+          .ToLocalChecked());
+
+  for (int i = 0, length = module->GetModuleRequestsLength(); i < length; ++i) {
+    Local<String> name = module->GetModuleRequest(i);
+    std::string absolute_path = EnsureAbsolutePath(ToSTLString(name), dir_name);
+    if (!module_map->count(absolute_path)) {
+      if (FetchModuleTree(isolate, absolute_path, module_map).IsEmpty()) {
+        return MaybeLocal<Module>();
+      }
+    }
+  }
+
+  return module;
+}
+
+bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
+  HandleScope handle_scope(isolate);
+
+  std::string absolute_path =
+      EnsureAbsolutePath(file_name, GetWorkingDirectory());
+  std::replace(absolute_path.begin(), absolute_path.end(), '\\', '/');
+
+  Local<Module> root_module;
+  std::map<std::string, Global<Module>> module_map;
+  if (!FetchModuleTree(isolate, absolute_path, &module_map)
+           .ToLocal(&root_module)) {
+    return false;
+  }
+
+  TryCatch try_catch(isolate);
+  try_catch.SetVerbose(true);
+
+  MaybeLocal<Value> maybe_result;
+  {
+    PerIsolateData* data = PerIsolateData::Get(isolate);
+    Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+    Context::Scope context_scope(realm);
+
+    if (root_module->Instantiate(realm, ResolveModuleCallback,
+                                 External::New(isolate, &module_map))) {
+      maybe_result = root_module->Evaluate(realm);
+      EmptyMessageQueues(isolate);
+    }
+  }
+  Local<Value> result;
+  if (!maybe_result.ToLocal(&result)) {
+    DCHECK(try_catch.HasCaught());
+    // Print errors that happened during execution.
+    ReportException(isolate, &try_catch);
+    return false;
+  }
+  DCHECK(!try_catch.HasCaught());
+  return true;
+}
 
 PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
   data_->realm_count_ = 1;
@@ -595,7 +704,6 @@
 }
 
 
-#ifndef V8_SHARED
 // performance.now() returns a time stamp as double, measured in milliseconds.
 // When FLAG_verify_predictable mode is enabled it returns result of
 // v8::Platform::MonotonicallyIncreasingTime().
@@ -608,7 +716,6 @@
     args.GetReturnValue().Set(delta.InMillisecondsF());
   }
 }
-#endif  // !V8_SHARED
 
 
 // Realm.current() returns the index of the currently active realm.
@@ -879,7 +986,6 @@
 }
 
 
-#ifndef V8_SHARED
 void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
   Isolate* isolate = args.GetIsolate();
   HandleScope handle_scope(isolate);
@@ -1001,16 +1107,13 @@
 
   worker->Terminate();
 }
-#endif  // !V8_SHARED
 
 
 void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
   int exit_code = (*args)[0]
                       ->Int32Value(args->GetIsolate()->GetCurrentContext())
                       .FromMaybe(0);
-#ifndef V8_SHARED
   CleanupWorkers();
-#endif  // !V8_SHARED
   OnExit(args->GetIsolate());
   Exit(exit_code);
 }
@@ -1031,14 +1134,12 @@
 
 void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
   HandleScope handle_scope(isolate);
-#ifndef V8_SHARED
   Local<Context> context;
   bool enter_context = !isolate->InContext();
   if (enter_context) {
     context = Local<Context>::New(isolate, evaluation_context_);
     context->Enter();
   }
-#endif  // !V8_SHARED
   v8::String::Utf8Value exception(try_catch->Exception());
   const char* exception_string = ToCString(exception);
   Local<Message> message = try_catch->Message();
@@ -1082,13 +1183,10 @@
     }
   }
   printf("\n");
-#ifndef V8_SHARED
   if (enter_context) context->Exit();
-#endif  // !V8_SHARED
 }
 
 
-#ifndef V8_SHARED
 int32_t* Counter::Bind(const char* name, bool is_histogram) {
   int i;
   for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -1217,7 +1315,6 @@
   if (result.IsEmpty()) return String::Empty(isolate);
   return result.ToLocalChecked().As<String>();
 }
-#endif  // !V8_SHARED
 
 
 Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
@@ -1308,7 +1405,6 @@
           .ToLocalChecked(),
       realm_template);
 
-#ifndef V8_SHARED
   Local<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
   performance_template->Set(
       String::NewFromUtf8(isolate, "now", NewStringType::kNormal)
@@ -1347,7 +1443,6 @@
       String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
           .ToLocalChecked(),
       worker_fun_template);
-#endif  // !V8_SHARED
 
   Local<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
   AddOSMethods(isolate, os_templ);
@@ -1365,21 +1460,17 @@
 }
 
 void Shell::Initialize(Isolate* isolate) {
-#ifndef V8_SHARED
   // Set up counters
   if (i::StrLength(i::FLAG_map_counters) != 0)
     MapCounters(isolate, i::FLAG_map_counters);
-#endif  // !V8_SHARED
   // Disable default message reporting.
   isolate->AddMessageListener(EmptyMessageCallback);
 }
 
 
 Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
-#ifndef V8_SHARED
   // This needs to be a critical section since this is not thread-safe
   base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
-#endif  // !V8_SHARED
   // Initialize the global objects
   Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
   EscapableHandleScope handle_scope(isolate);
@@ -1387,7 +1478,6 @@
   DCHECK(!context.IsEmpty());
   Context::Scope scope(context);
 
-#ifndef V8_SHARED
   i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
   i::JSArguments js_args = i::FLAG_js_arguments;
   i::Handle<i::FixedArray> arguments_array =
@@ -1405,7 +1495,6 @@
                 .ToLocalChecked(),
             Utils::ToLocal(arguments_jsarray))
       .FromJust();
-#endif  // !V8_SHARED
   return handle_scope.Escape(context);
 }
 
@@ -1419,7 +1508,6 @@
 }
 
 
-#ifndef V8_SHARED
 struct CounterAndKey {
   Counter* counter;
   const char* key;
@@ -1444,11 +1532,8 @@
       JSON::Stringify(context, dispatch_counters).ToLocalChecked());
 }
 
-#endif  // !V8_SHARED
-
 
 void Shell::OnExit(v8::Isolate* isolate) {
-#ifndef V8_SHARED
   if (i::FLAG_dump_counters) {
     int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1484,7 +1569,6 @@
 
   delete counters_file_;
   delete counter_map_;
-#endif  // !V8_SHARED
 }
 
 
@@ -1618,10 +1702,8 @@
 
 
 SourceGroup::~SourceGroup() {
-#ifndef V8_SHARED
   delete thread_;
   thread_ = NULL;
-#endif  // !V8_SHARED
 }
 
 
@@ -1629,7 +1711,6 @@
   bool exception_was_thrown = false;
   for (int i = begin_offset_; i < end_offset_; ++i) {
     const char* arg = argv_[i];
-    Shell::SourceType source_type = Shell::SCRIPT;
     if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
       // Execute argument given to -e option directly.
       HandleScope handle_scope(isolate);
@@ -1648,8 +1729,13 @@
       continue;
     } else if (strcmp(arg, "--module") == 0 && i + 1 < end_offset_) {
       // Treat the next file as a module.
-      source_type = Shell::MODULE;
       arg = argv_[++i];
+      Shell::options.script_executed = true;
+      if (!Shell::ExecuteModule(isolate, arg)) {
+        exception_was_thrown = true;
+        break;
+      }
+      continue;
     } else if (arg[0] == '-') {
       // Ignore other options. They have been parsed already.
       continue;
@@ -1666,8 +1752,7 @@
       Shell::Exit(1);
     }
     Shell::options.script_executed = true;
-    if (!Shell::ExecuteString(isolate, source, file_name, false, true,
-                              source_type)) {
+    if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
       exception_was_thrown = true;
       break;
     }
@@ -1690,7 +1775,6 @@
 }
 
 
-#ifndef V8_SHARED
 base::Thread::Options SourceGroup::GetThreadOptions() {
   // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
   // which is not enough to parse the big literal expressions used in tests.
@@ -2014,7 +2098,6 @@
     delete data;
   }
 }
-#endif  // !V8_SHARED
 
 
 void SetFlagsFromString(const char* flags) {
@@ -2070,30 +2153,16 @@
       // JavaScript engines.
       continue;
     } else if (strcmp(argv[i], "--isolate") == 0) {
-#ifdef V8_SHARED
-      printf("D8 with shared library does not support multi-threading\n");
-      return false;
-#endif  // V8_SHARED
       options.num_isolates++;
     } else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
-#ifdef V8_SHARED
-      printf("D8 with shared library does not support constant dumping\n");
-      return false;
-#else
       options.dump_heap_constants = true;
       argv[i] = NULL;
-#endif  // V8_SHARED
     } else if (strcmp(argv[i], "--throws") == 0) {
       options.expected_to_throw = true;
       argv[i] = NULL;
     } else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
       options.icu_data_file = argv[i] + 16;
       argv[i] = NULL;
-#ifdef V8_SHARED
-    } else if (strcmp(argv[i], "--dump-counters") == 0) {
-      printf("D8 with shared library does not include counters\n");
-      return false;
-#endif  // V8_SHARED
 #ifdef V8_USE_EXTERNAL_STARTUP_DATA
     } else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
       options.natives_blob = argv[i] + 15;
@@ -2159,11 +2228,9 @@
 
 
 int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
-#ifndef V8_SHARED
   for (int i = 1; i < options.num_isolates; ++i) {
     options.isolate_sources[i].StartExecuteInThread();
   }
-#endif  // !V8_SHARED
   {
     HandleScope scope(isolate);
     Local<Context> context = CreateEvaluationContext(isolate);
@@ -2178,7 +2245,6 @@
     }
   }
   CollectGarbage(isolate);
-#ifndef V8_SHARED
   for (int i = 1; i < options.num_isolates; ++i) {
     if (last_run) {
       options.isolate_sources[i].JoinThread();
@@ -2187,7 +2253,6 @@
     }
   }
   CleanupWorkers();
-#endif  // !V8_SHARED
   return 0;
 }
 
@@ -2209,17 +2274,12 @@
 
 
 void Shell::EmptyMessageQueues(Isolate* isolate) {
-#ifndef V8_SHARED
   if (!i::FLAG_verify_predictable) {
-#endif
     while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
-#ifndef V8_SHARED
   }
-#endif
 }
 
 
-#ifndef V8_SHARED
 bool Shell::SerializeValue(Isolate* isolate, Local<Value> value,
                            const ObjectList& to_transfer,
                            ObjectList* seen_objects,
@@ -2534,14 +2594,11 @@
   printf("}\n");
 #undef ROOT_LIST_CASE
 }
-#endif  // !V8_SHARED
 
 
 int Shell::Main(int argc, char* argv[]) {
   std::ofstream trace_file;
-#ifndef V8_SHARED
   v8::base::debug::EnableInProcessStackDumping();
-#endif
 #if (defined(_WIN32) || defined(_WIN64))
   UINT new_flags =
       SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
@@ -2559,13 +2616,9 @@
 #endif  // defined(_WIN32) || defined(_WIN64)
   if (!SetOptions(argc, argv)) return 1;
   v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
-#ifndef V8_SHARED
   g_platform = i::FLAG_verify_predictable
                    ? new PredictablePlatform()
                    : v8::platform::CreateDefaultPlatform();
-#else
-  g_platform = v8::platform::CreateDefaultPlatform();
-#endif  // !V8_SHARED
 
   v8::V8::InitializePlatform(g_platform);
   v8::V8::Initialize();
@@ -2591,7 +2644,6 @@
 #ifdef ENABLE_VTUNE_JIT_INTERFACE
   create_params.code_event_handler = vTune::GetVtuneCodeEventHandler();
 #endif
-#ifndef V8_SHARED
   create_params.constraints.ConfigureDefaults(
       base::SysInfo::AmountOfPhysicalMemory(),
       base::SysInfo::AmountOfVirtualMemory());
@@ -2602,7 +2654,6 @@
     create_params.create_histogram_callback = CreateHistogram;
     create_params.add_histogram_sample_callback = AddHistogramSample;
   }
-#endif
   Isolate* isolate = Isolate::New(create_params);
   {
     Isolate::Scope scope(isolate);
@@ -2632,21 +2683,15 @@
       }
       tracing_controller->Initialize(trace_buffer);
       tracing_controller->StartTracing(trace_config);
-#ifndef V8_SHARED
       if (!i::FLAG_verify_predictable) {
         platform::SetTracingController(g_platform, tracing_controller);
       }
-#else
-      platform::SetTracingController(g_platform, tracing_controller);
-#endif
     }
 
-#ifndef V8_SHARED
     if (options.dump_heap_constants) {
       DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
       return 0;
     }
-#endif
 
     if (options.stress_opt || options.stress_deopt) {
       Testing::SetStressRunType(options.stress_opt
@@ -2662,7 +2707,6 @@
       }
       printf("======== Full Deoptimization =======\n");
       Testing::DeoptimizeAll(isolate);
-#if !defined(V8_SHARED)
     } else if (i::FLAG_stress_runs > 0) {
       options.stress_runs = i::FLAG_stress_runs;
       for (int i = 0; i < options.stress_runs && result == 0; i++) {
@@ -2671,7 +2715,6 @@
         bool last_run = i == options.stress_runs - 1;
         result = RunMain(isolate, argc, argv, last_run);
       }
-#endif
     } else {
       bool last_run = true;
       result = RunMain(isolate, argc, argv, last_run);
@@ -2683,29 +2726,23 @@
       RunShell(isolate);
     }
 
-#ifndef V8_SHARED
     if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
         i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
       WriteIgnitionDispatchCountersFile(isolate);
     }
-#endif
 
     // Shut down contexts and collect garbage.
     evaluation_context_.Reset();
-#ifndef V8_SHARED
     stringify_function_.Reset();
-#endif  // !V8_SHARED
     CollectGarbage(isolate);
   }
   OnExit(isolate);
-#ifndef V8_SHARED
   // Dump basic block profiling data.
   if (i::BasicBlockProfiler* profiler =
           reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
     i::OFStream os(stdout);
     os << *profiler;
   }
-#endif  // !V8_SHARED
   isolate->Dispose();
   V8::Dispose();
   V8::ShutdownPlatform();
diff --git a/src/d8.gyp b/src/d8.gyp
index cc65a5b..e0270f5 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -49,10 +49,18 @@
       'sources': [
         'd8.h',
         'd8.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
       ],
       'conditions': [
         [ 'want_separate_host_toolset==1', {
           'toolsets': [ 'target', ],
+          'dependencies': [
+            'd8_js2c#host',
+          ],
+        }, {
+          'dependencies': [
+            'd8_js2c',
+          ],
         }],
         ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
            or OS=="openbsd" or OS=="solaris" or OS=="android" \
@@ -63,19 +71,7 @@
           'sources': [ 'd8-windows.cc', ]
         }],
         [ 'component!="shared_library"', {
-          'sources': [
-            '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
-          ],
           'conditions': [
-            [ 'want_separate_host_toolset==1', {
-              'dependencies': [
-                'd8_js2c#host',
-              ],
-            }, {
-              'dependencies': [
-                'd8_js2c',
-              ],
-            }],
             [ 'v8_postmortem_support=="true"', {
               'xcode_settings': {
                 'OTHER_LDFLAGS': [
diff --git a/src/d8.h b/src/d8.h
index 0e365a5..32a7d25 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -5,15 +5,13 @@
 #ifndef V8_D8_H_
 #define V8_D8_H_
 
-#ifndef V8_SHARED
+#include <map>
+#include <string>
+
 #include "src/allocation.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/time.h"
 #include "src/list.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif  // !V8_SHARED
 
 #include "src/base/once.h"
 
@@ -21,7 +19,6 @@
 namespace v8 {
 
 
-#ifndef V8_SHARED
 // A single counter in a counter collection.
 class Counter {
  public:
@@ -81,26 +78,23 @@
     const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
     Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
    private:
-    base::HashMap* map_;
-    base::HashMap::Entry* entry_;
+    base::CustomMatcherHashMap* map_;
+    base::CustomMatcherHashMap::Entry* entry_;
   };
 
  private:
   static int Hash(const char* name);
   static bool Match(void* key1, void* key2);
-  base::HashMap hash_map_;
+  base::CustomMatcherHashMap hash_map_;
 };
-#endif  // !V8_SHARED
 
 
 class SourceGroup {
  public:
   SourceGroup() :
-#ifndef V8_SHARED
       next_semaphore_(0),
       done_semaphore_(0),
       thread_(NULL),
-#endif  // !V8_SHARED
       argv_(NULL),
       begin_offset_(0),
       end_offset_(0) {}
@@ -116,7 +110,6 @@
 
   void Execute(Isolate* isolate);
 
-#ifndef V8_SHARED
   void StartExecuteInThread();
   void WaitForThread();
   void JoinThread();
@@ -141,7 +134,6 @@
   base::Semaphore next_semaphore_;
   base::Semaphore done_semaphore_;
   base::Thread* thread_;
-#endif  // !V8_SHARED
 
   void ExitShell(int exit_code);
   Local<String> ReadFile(Isolate* isolate, const char* name);
@@ -151,7 +143,6 @@
   int end_offset_;
 };
 
-#ifndef V8_SHARED
 enum SerializationTag {
   kSerializationTagUndefined,
   kSerializationTagNull,
@@ -267,7 +258,6 @@
   char* script_;
   base::Atomic32 running_;
 };
-#endif  // !V8_SHARED
 
 
 class ShellOptions {
@@ -324,23 +314,15 @@
   const char* trace_config;
 };
 
-#ifdef V8_SHARED
-class Shell {
-#else
 class Shell : public i::AllStatic {
-#endif  // V8_SHARED
-
  public:
-  enum SourceType { SCRIPT, MODULE };
-
   static MaybeLocal<Script> CompileString(
       Isolate* isolate, Local<String> source, Local<Value> name,
-      v8::ScriptCompiler::CompileOptions compile_options,
-      SourceType source_type);
+      v8::ScriptCompiler::CompileOptions compile_options);
   static bool ExecuteString(Isolate* isolate, Local<String> source,
                             Local<Value> name, bool print_result,
-                            bool report_exceptions,
-                            SourceType source_type = SCRIPT);
+                            bool report_exceptions);
+  static bool ExecuteModule(Isolate* isolate, const char* file_name);
   static const char* ToCString(const v8::String::Utf8Value& value);
   static void ReportException(Isolate* isolate, TryCatch* try_catch);
   static Local<String> ReadFile(Isolate* isolate, const char* name);
@@ -352,7 +334,6 @@
   static void CollectGarbage(Isolate* isolate);
   static void EmptyMessageQueues(Isolate* isolate);
 
-#ifndef V8_SHARED
   // TODO(binji): stupid implementation for now. Is there an easy way to hash an
   // object for use in base::HashMap? By pointer?
   typedef i::List<Local<Object>> ObjectList;
@@ -373,7 +354,6 @@
   static void MapCounters(v8::Isolate* isolate, const char* name);
 
   static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
-#endif  // !V8_SHARED
 
   static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -451,7 +431,6 @@
  private:
   static Global<Context> evaluation_context_;
   static base::OnceType quit_once_;
-#ifndef V8_SHARED
   static Global<Function> stringify_function_;
   static CounterMap* counter_map_;
   // We statically allocate a set of local counters to be used if we
@@ -470,13 +449,15 @@
   static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
   static Counter* GetCounter(const char* name, bool is_histogram);
   static Local<String> Stringify(Isolate* isolate, Local<Value> value);
-#endif  // !V8_SHARED
   static void Initialize(Isolate* isolate);
   static void RunShell(Isolate* isolate);
   static bool SetOptions(int argc, char* argv[]);
   static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
   static MaybeLocal<Context> CreateRealm(
       const v8::FunctionCallbackInfo<v8::Value>& args);
+  static MaybeLocal<Module> FetchModuleTree(
+      Isolate* isolate, const std::string& file_name,
+      std::map<std::string, Global<Module>>* module_map);
 };
 
 
diff --git a/src/dateparser.h b/src/dateparser.h
index d7676cb..709c1cb 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -7,7 +7,7 @@
 
 #include "src/allocation.h"
 #include "src/char-predicates.h"
-#include "src/parsing/scanner.h"
+#include "src/unicode-cache.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/debug/arm/debug-arm.cc b/src/debug/arm/debug-arm.cc
index 29e4827..d96ec31 100644
--- a/src/debug/arm/debug-arm.cc
+++ b/src/debug/arm/debug-arm.cc
@@ -4,9 +4,11 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug/arm64/debug-arm64.cc b/src/debug/arm64/debug-arm64.cc
index bf7964a..e344924 100644
--- a/src/debug/arm64/debug-arm64.cc
+++ b/src/debug/arm64/debug-arm64.cc
@@ -4,9 +4,11 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/debug/debug.h"
+
 #include "src/arm64/frames-arm64.h"
 #include "src/codegen.h"
-#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index fb2df31..8970520 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -94,7 +94,13 @@
   if (context_extension->IsJSObject()) {
     Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
     Handle<JSFunction> closure(context->closure(), isolate);
-    context = isolate->factory()->NewWithContext(closure, context, extension);
+    context = isolate->factory()->NewWithContext(
+        closure, context,
+        ScopeInfo::CreateForWithScope(
+            isolate, context->IsNativeContext()
+                         ? Handle<ScopeInfo>::null()
+                         : Handle<ScopeInfo>(context->scope_info())),
+        extension);
   }
 
   Handle<JSFunction> eval_fun;
@@ -203,8 +209,13 @@
   }
 
   for (int i = context_chain_.length() - 1; i >= 0; i--) {
+    Handle<ScopeInfo> scope_info(ScopeInfo::CreateForWithScope(
+        isolate, evaluation_context_->IsNativeContext()
+                     ? Handle<ScopeInfo>::null()
+                     : Handle<ScopeInfo>(evaluation_context_->scope_info())));
+    scope_info->SetIsDebugEvaluateScope();
     evaluation_context_ = factory->NewDebugEvaluateContext(
-        evaluation_context_, context_chain_[i].materialized_object,
+        evaluation_context_, scope_info, context_chain_[i].materialized_object,
         context_chain_[i].wrapped_context, context_chain_[i].whitelist);
   }
 }
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index 55108bb..c7eb0f7 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -7,7 +7,6 @@
 #include <memory>
 
 #include "src/ast/scopes.h"
-#include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/frames-inl.h"
 #include "src/globals.h"
@@ -100,7 +99,9 @@
     } else {
       DCHECK(scope_info->scope_type() == EVAL_SCOPE);
       info->set_eval();
-      info->set_context(Handle<Context>(function->context()));
+      if (!function->context()->IsNativeContext()) {
+        info->set_outer_scope_info(handle(function->context()->scope_info()));
+      }
       // Language mode may be inherited from the eval caller.
       // Retrieve it from shared function info.
       info->set_language_mode(shared_info->language_mode());
@@ -115,8 +116,7 @@
       CollectNonLocals(info.get(), scope);
     }
     if (!ignore_nested_scopes) {
-      AstNodeFactory ast_node_factory(info.get()->ast_value_factory());
-      scope->AllocateVariables(info.get(), &ast_node_factory);
+      DeclarationScope::Analyze(info.get(), AnalyzeMode::kDebugger);
       RetrieveScopeChain(scope);
     }
   } else if (!ignore_nested_scopes) {
@@ -364,7 +364,7 @@
     case ScopeIterator::ScopeTypeEval:
       return SetInnerScopeVariableValue(variable_name, new_value);
     case ScopeIterator::ScopeTypeModule:
-      // TODO(2399): should we implement it?
+      // TODO(neis): Implement.
       break;
   }
   return false;
@@ -619,6 +619,8 @@
   // Fill all context locals.
   CopyContextLocalsToScopeObject(scope_info, context, module_scope);
 
+  // TODO(neis): Also collect stack locals as well as imports and exports.
+
   return module_scope;
 }
 
@@ -819,11 +821,10 @@
   if (scope->is_hidden()) {
     // We need to add this chain element in case the scope has a context
     // associated. We need to keep the scope chain and context chain in sync.
-    nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate)));
+    nested_scope_chain_.Add(ExtendedScopeInfo(scope->scope_info()));
   } else {
-    nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
-                                              scope->start_position(),
-                                              scope->end_position()));
+    nested_scope_chain_.Add(ExtendedScopeInfo(
+        scope->scope_info(), scope->start_position(), scope->end_position()));
   }
   for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
        inner_scope = inner_scope->sibling()) {
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index 0491d73..026a1da 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -11,6 +11,8 @@
 namespace v8 {
 namespace internal {
 
+class ParseInfo;
+
 // Iterate over the actual scopes visible from a stack frame or from a closure.
 // The iteration proceeds from the innermost visible nested scope outwards.
 // All scopes are backed by an actual context except the local scope,
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index e046957..5323c13 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -14,6 +14,7 @@
 #include "src/compilation-cache.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/compiler.h"
+#include "src/debug/liveedit.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
 #include "src/frames-inl.h"
@@ -1281,7 +1282,7 @@
 
   // Make sure we abort incremental marking.
   isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                      "prepare for break points");
+                                      GarbageCollectionReason::kDebugger);
 
   DCHECK(shared->is_compiled());
   bool baseline_exists = shared->HasBaselineCode();
@@ -1293,7 +1294,8 @@
     HeapIterator iterator(isolate_->heap());
     HeapObject* obj;
     // Continuation from old-style generators need to be recomputed.
-    bool find_resumables = baseline_exists && shared->is_resumable();
+    bool find_resumables =
+        baseline_exists && IsResumableFunction(shared->kind());
 
     while ((obj = iterator.next())) {
       if (obj->IsJSFunction()) {
@@ -1352,7 +1354,7 @@
 
 void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
   if (last_step_action() <= StepOut) return;
-  if (!generator_object->function()->shared()->is_async()) return;
+  if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
   DCHECK(!has_suspended_generator());
   thread_local_.suspended_generator_ = *generator_object;
   ClearStepping();
@@ -1576,10 +1578,9 @@
   return location.IsReturn() || location.IsTailCall();
 }
 
-
 void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
-                                  LiveEdit::FrameDropMode mode) {
-  if (mode != LiveEdit::CURRENTLY_SET_MODE) {
+                                  LiveEditFrameDropMode mode) {
+  if (mode != LIVE_EDIT_CURRENTLY_SET_MODE) {
     thread_local_.frame_drop_mode_ = mode;
   }
   thread_local_.break_frame_id_ = new_break_frame_id;
@@ -1599,7 +1600,8 @@
 
 
 Handle<FixedArray> Debug::GetLoadedScripts() {
-  isolate_->heap()->CollectAllGarbage();
+  isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+                                      GarbageCollectionReason::kDebugger);
   Factory* factory = isolate_->factory();
   if (!factory->script_list()->IsWeakFixedArray()) {
     return factory->empty_fixed_array();
@@ -1681,43 +1683,38 @@
   }
 }
 
-
-void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
+void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
   if (in_debug_scope() || ignore_events()) return;
   HandleScope scope(isolate_);
   // Check whether the promise has been marked as having triggered a message.
   Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate_)) {
+  if (!promise->IsJSObject() ||
+      JSReceiver::GetDataProperty(Handle<JSObject>::cast(promise), key)
+          ->IsUndefined(isolate_)) {
     OnException(value, promise);
   }
 }
 
 
-MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
-    Handle<JSObject> promise) {
-  Handle<JSFunction> fun = isolate_->promise_has_user_defined_reject_handler();
-  return Execution::Call(isolate_, fun, promise, 0, NULL);
-}
-
-
 void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
+  // We cannot generate debug events when JS execution is disallowed.
+  // TODO(5530): Reenable debug events within DisallowJSScopes once relevant
+  // code (MakeExceptionEvent and ProcessDebugEvent) have been moved to C++.
+  if (!AllowJavascriptExecution::IsAllowed(isolate_)) return;
+
   Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
 
   // Don't notify listener of exceptions that are internal to a desugaring.
   if (catch_type == Isolate::CAUGHT_BY_DESUGARING) return;
 
-  bool uncaught = (catch_type == Isolate::NOT_CAUGHT);
+  bool uncaught = catch_type == Isolate::NOT_CAUGHT;
   if (promise->IsJSObject()) {
     Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
     // Mark the promise as already having triggered a message.
     Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
     JSObject::SetProperty(jspromise, key, key, STRICT).Assert();
     // Check whether the promise reject is considered an uncaught exception.
-    Handle<Object> has_reject_handler;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, has_reject_handler,
-        PromiseHasUserDefinedRejectHandler(jspromise), /* void */);
-    uncaught = has_reject_handler->IsFalse(isolate_);
+    uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
   }
   // Bail out if exception breaks are not active
   if (uncaught) {
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 36f973c..c4e8c17 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -11,11 +11,11 @@
 #include "src/base/atomicops.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
-#include "src/debug/liveedit.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/flags.h"
 #include "src/frames.h"
+#include "src/globals.h"
 #include "src/runtime/runtime.h"
 #include "src/source-position-table.h"
 #include "src/string-stream.h"
@@ -413,7 +413,7 @@
   void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
 
   void OnThrow(Handle<Object> exception);
-  void OnPromiseReject(Handle<JSObject> promise, Handle<Object> value);
+  void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
   void OnCompileError(Handle<Script> script);
   void OnBeforeCompile(Handle<Script> script);
   void OnAfterCompile(Handle<Script> script);
@@ -489,7 +489,7 @@
 
   // Support for LiveEdit
   void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
-                             LiveEdit::FrameDropMode mode);
+                             LiveEditFrameDropMode mode);
 
   // Threading support.
   char* ArchiveDebug(char* to);
@@ -594,9 +594,6 @@
   // Mirror cache handling.
   void ClearMirrorCache();
 
-  MaybeHandle<Object> PromiseHasUserDefinedRejectHandler(
-      Handle<JSObject> promise);
-
   void CallEventCallback(v8::DebugEvent event,
                          Handle<Object> exec_state,
                          Handle<Object> event_data,
@@ -704,7 +701,7 @@
 
     // Stores the way how LiveEdit has patched the stack. It is used when
     // debugger returns control back to user script.
-    LiveEdit::FrameDropMode frame_drop_mode_;
+    LiveEditFrameDropMode frame_drop_mode_;
 
     // Value of accumulator in interpreter frames. In non-interpreter frames
     // this value will be the hole.
diff --git a/src/debug/ia32/debug-ia32.cc b/src/debug/ia32/debug-ia32.cc
index 8e4dee7..47ec69e 100644
--- a/src/debug/ia32/debug-ia32.cc
+++ b/src/debug/ia32/debug-ia32.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
+
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
 #include "src/ia32/frames-ia32.h"
 
 namespace v8 {
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index b51bb1a..b451842 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -14,7 +14,6 @@
 #include "src/global-handles.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
-#include "src/parsing/parser.h"
 #include "src/source-position-table.h"
 #include "src/v8.h"
 #include "src/v8memory.h"
@@ -655,7 +654,7 @@
 
 
 void LiveEdit::InitializeThreadLocal(Debug* debug) {
-  debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED;
+  debug->thread_local_.frame_drop_mode_ = LIVE_EDIT_FRAMES_UNTOUCHED;
 }
 
 
@@ -663,20 +662,20 @@
   Code* code = NULL;
   Isolate* isolate = debug->isolate_;
   switch (debug->thread_local_.frame_drop_mode_) {
-    case FRAMES_UNTOUCHED:
+    case LIVE_EDIT_FRAMES_UNTOUCHED:
       return false;
-    case FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
+    case LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
       // Debug break slot stub does not return normally, instead it manually
       // cleans the stack and jumps. We should patch the jump address.
       code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
       break;
-    case FRAME_DROPPED_IN_DIRECT_CALL:
+    case LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL:
       // Nothing to do, after_break_target is not used here.
       return true;
-    case FRAME_DROPPED_IN_RETURN_CALL:
+    case LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL:
       code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
       break;
-    case CURRENTLY_SET_MODE:
+    case LIVE_EDIT_CURRENTLY_SET_MODE:
       UNREACHABLE();
       break;
   }
@@ -1017,6 +1016,7 @@
           handle(shared_info->GetDebugInfo()));
     }
     shared_info->set_scope_info(new_shared_info->scope_info());
+    shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
     shared_info->DisableOptimization(kLiveEdit);
     // Update the type feedback vector, if needed.
     Handle<TypeFeedbackMetadata> new_feedback_metadata(
@@ -1303,7 +1303,7 @@
 // Returns error message or NULL.
 static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
                               int bottom_js_frame_index,
-                              LiveEdit::FrameDropMode* mode) {
+                              LiveEditFrameDropMode* mode) {
   if (!LiveEdit::kFrameDropperSupported) {
     return "Stack manipulations are not supported in this architecture.";
   }
@@ -1321,22 +1321,22 @@
   if (pre_top_frame_code ==
       isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
     // OK, we can drop debug break slot.
-    *mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
+    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
   } else if (pre_top_frame_code ==
              isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     pre_top_frame = frames[top_frame_index - 2];
     top_frame = frames[top_frame_index - 1];
-    *mode = LiveEdit::CURRENTLY_SET_MODE;
+    *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
     frame_has_padding = false;
   } else if (pre_top_frame_code ==
              isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
-    *mode = LiveEdit::FRAME_DROPPED_IN_RETURN_CALL;
+    *mode = LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
              CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) {
     // Entry from our unit tests on 'debugger' statement.
     // It's fine, we support this case.
-    *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
     // We don't have a padding from 'debugger' statement call.
     // Here the stub is CEntry, it's not debug-only and can't be padded.
     // If anyone would complain, a proxy padded stub could be added.
@@ -1348,13 +1348,13 @@
            isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
     pre_top_frame = frames[top_frame_index - 3];
     top_frame = frames[top_frame_index - 2];
-    *mode = LiveEdit::CURRENTLY_SET_MODE;
+    *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
     frame_has_padding = false;
   } else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
     // Interpreted bytecode takes up two stack frames, one for the bytecode
     // handler and one for the interpreter entry trampoline. Therefore we shift
     // up by one frame.
-    *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
     pre_top_frame = frames[top_frame_index - 2];
     top_frame = frames[top_frame_index - 1];
   } else {
@@ -1557,7 +1557,7 @@
     if (frame->is_java_script()) {
       SharedFunctionInfo* shared =
           JavaScriptFrame::cast(frame)->function()->shared();
-      if (shared->is_resumable()) {
+      if (IsResumableFunction(shared->kind())) {
         non_droppable_frame_found = true;
         non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
         break;
@@ -1605,7 +1605,7 @@
     return target.GetNotFoundMessage();
   }
 
-  LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED;
+  LiveEditFrameDropMode drop_mode = LIVE_EDIT_FRAMES_UNTOUCHED;
   const char* error_message =
       DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
 
@@ -1900,25 +1900,19 @@
   Scope* current_scope = scope;
   while (current_scope != NULL) {
     HandleScope handle_scope(isolate_);
-    ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone_);
-    ZoneList<Variable*> context_list(current_scope->ContextLocalCount(), zone_);
-    ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
-                                     zone_);
-    current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
-                                                &globals_list);
-    context_list.Sort(&Variable::CompareIndex);
-
-    for (int i = 0; i < context_list.length(); i++) {
-      SetElementSloppy(scope_info_list, scope_info_length,
-                       context_list[i]->name());
-      scope_info_length++;
-      SetElementSloppy(
-          scope_info_list, scope_info_length,
-          Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate_));
-      scope_info_length++;
+    ZoneList<Variable*>* locals = current_scope->locals();
+    for (int i = 0; i < locals->length(); i++) {
+      Variable* var = locals->at(i);
+      if (!var->IsContextSlot()) continue;
+      int context_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+      int location = scope_info_length + context_index * 2;
+      SetElementSloppy(scope_info_list, location, var->name());
+      SetElementSloppy(scope_info_list, location + 1,
+                       handle(Smi::FromInt(var->index()), isolate_));
     }
+    scope_info_length += current_scope->ContextLocalCount() * 2;
     SetElementSloppy(scope_info_list, scope_info_length,
-                     Handle<Object>(isolate_->heap()->null_value(), isolate_));
+                     isolate_->factory()->null_value());
     scope_info_length++;
 
     current_scope = current_scope->outer_scope();
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h
index 784f828..2034dcb 100644
--- a/src/debug/liveedit.h
+++ b/src/debug/liveedit.h
@@ -72,20 +72,6 @@
 
 class LiveEdit : AllStatic {
  public:
-  // Describes how exactly a frame has been dropped from stack.
-  enum FrameDropMode {
-    // No frame has been dropped.
-    FRAMES_UNTOUCHED,
-    // The top JS frame had been calling debug break slot stub. Patch the
-    // address this stub jumps to in the end.
-    FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
-    // The top JS frame had been calling some C++ function. The return address
-    // gets patched automatically.
-    FRAME_DROPPED_IN_DIRECT_CALL,
-    FRAME_DROPPED_IN_RETURN_CALL,
-    CURRENTLY_SET_MODE
-  };
-
   static void InitializeThreadLocal(Debug* debug);
 
   static bool SetAfterBreakTarget(Debug* debug);
diff --git a/src/debug/mips/debug-mips.cc b/src/debug/mips/debug-mips.cc
index 49320d8..4d8b54f 100644
--- a/src/debug/mips/debug-mips.cc
+++ b/src/debug/mips/debug-mips.cc
@@ -4,9 +4,11 @@
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug/mips64/debug-mips64.cc b/src/debug/mips64/debug-mips64.cc
index 2e967d7..2a6ce7b 100644
--- a/src/debug/mips64/debug-mips64.cc
+++ b/src/debug/mips64/debug-mips64.cc
@@ -4,9 +4,11 @@
 
 #if V8_TARGET_ARCH_MIPS64
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug/ppc/debug-ppc.cc b/src/debug/ppc/debug-ppc.cc
index 7facf95..e57aa3c 100644
--- a/src/debug/ppc/debug-ppc.cc
+++ b/src/debug/ppc/debug-ppc.cc
@@ -4,9 +4,11 @@
 
 #if V8_TARGET_ARCH_PPC
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug/s390/debug-s390.cc b/src/debug/s390/debug-s390.cc
index 9c33b95..b745d5b 100644
--- a/src/debug/s390/debug-s390.cc
+++ b/src/debug/s390/debug-s390.cc
@@ -6,9 +6,11 @@
 
 #if V8_TARGET_ARCH_S390
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug/x64/debug-x64.cc b/src/debug/x64/debug-x64.cc
index 910d1ca..4f80e18 100644
--- a/src/debug/x64/debug-x64.cc
+++ b/src/debug/x64/debug-x64.cc
@@ -4,10 +4,11 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/assembler.h"
-#include "src/codegen.h"
 #include "src/debug/debug.h"
 
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/debug/x87/debug-x87.cc b/src/debug/x87/debug-x87.cc
index 1cbdf45..c29eac1 100644
--- a/src/debug/x87/debug-x87.cc
+++ b/src/debug/x87/debug-x87.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/codegen.h"
 #include "src/debug/debug.h"
+
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
 #include "src/x87/frames-x87.h"
 
 namespace v8 {
diff --git a/src/deoptimize-reason.cc b/src/deoptimize-reason.cc
index 87c8905..b0ee780 100644
--- a/src/deoptimize-reason.cc
+++ b/src/deoptimize-reason.cc
@@ -23,7 +23,7 @@
   return static_cast<uint8_t>(reason);
 }
 
-char const* const DeoptimizeReasonToString(DeoptimizeReason reason) {
+char const* DeoptimizeReasonToString(DeoptimizeReason reason) {
   static char const* kDeoptimizeReasonStrings[] = {
 #define DEOPTIMIZE_REASON(Name, message) message,
       DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
diff --git a/src/deoptimize-reason.h b/src/deoptimize-reason.h
index 60e0a59..d28ec47 100644
--- a/src/deoptimize-reason.h
+++ b/src/deoptimize-reason.h
@@ -23,6 +23,7 @@
   V(ForcedDeoptToRuntime, "Forced deopt to runtime")                          \
   V(Hole, "hole")                                                             \
   V(InstanceMigrationFailed, "instance migration failed")                     \
+  V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call")   \
   V(InsufficientTypeFeedbackForCallWithArguments,                             \
     "Insufficient type feedback for call with arguments")                     \
   V(FastPathFailed, "Falling off the fast path")                              \
@@ -68,7 +69,6 @@
     "Unexpected cell contents in global store")                               \
   V(UnexpectedObject, "unexpected object")                                    \
   V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation")     \
-  V(UninitializedBoilerplateLiterals, "Uninitialized boilerplate literals")   \
   V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access")       \
   V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call")           \
   V(UnknownMapInPolymorphicElementAccess,                                     \
@@ -90,7 +90,7 @@
 
 size_t hash_value(DeoptimizeReason reason);
 
-char const* const DeoptimizeReasonToString(DeoptimizeReason reason);
+char const* DeoptimizeReasonToString(DeoptimizeReason reason);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index d4756ff..971de9e 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -73,13 +73,8 @@
                               Address from,
                               int fp_to_sp_delta,
                               Isolate* isolate) {
-  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
-                                             function,
-                                             type,
-                                             bailout_id,
-                                             from,
-                                             fp_to_sp_delta,
-                                             NULL);
+  Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
+                                             bailout_id, from, fp_to_sp_delta);
   CHECK(isolate->deoptimizer_data()->current_ == NULL);
   isolate->deoptimizer_data()->current_ = deoptimizer;
   return deoptimizer;
@@ -108,23 +103,6 @@
   return result;
 }
 
-
-int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
-  if (jsframe_index == 0) return 0;
-
-  int frame_index = 0;
-  while (jsframe_index >= 0) {
-    FrameDescription* frame = output_[frame_index];
-    if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
-      jsframe_index--;
-    }
-    frame_index++;
-  }
-
-  return frame_index - 1;
-}
-
-
 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
     JavaScriptFrame* frame,
     int jsframe_index,
@@ -366,8 +344,7 @@
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
     CodeTracer::Scope scope(isolate->GetCodeTracer());
     PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
@@ -388,8 +365,7 @@
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
     CodeTracer::Scope scope(isolate->GetCodeTracer());
     PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
@@ -422,8 +398,7 @@
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   Code* code = function->code();
   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
     // Mark the code for deoptimization and unlink any functions that also
@@ -439,19 +414,9 @@
   deoptimizer->DoComputeOutputFrames();
 }
 
-
-bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
-                                  StackFrame::Type frame_type) {
-  switch (deopt_type) {
-    case EAGER:
-    case SOFT:
-    case LAZY:
-      return (frame_type == StackFrame::STUB)
-          ? FLAG_trace_stub_failures
-          : FLAG_trace_deopt;
-  }
-  FATAL("Unsupported deopt type");
-  return false;
+bool Deoptimizer::TraceEnabledFor(StackFrame::Type frame_type) {
+  return (frame_type == StackFrame::STUB) ? FLAG_trace_stub_failures
+                                          : FLAG_trace_deopt;
 }
 
 
@@ -467,7 +432,7 @@
 
 Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
                          BailoutType type, unsigned bailout_id, Address from,
-                         int fp_to_sp_delta, Code* optimized_code)
+                         int fp_to_sp_delta)
     : isolate_(isolate),
       function_(function),
       bailout_id_(bailout_id),
@@ -510,7 +475,7 @@
       function->shared()->set_opt_count(opt_count);
     }
   }
-  compiled_code_ = FindOptimizedCode(function, optimized_code);
+  compiled_code_ = FindOptimizedCode(function);
 #if DEBUG
   DCHECK(compiled_code_ != NULL);
   if (type == EAGER || type == SOFT || type == LAZY) {
@@ -521,8 +486,9 @@
   StackFrame::Type frame_type = function == NULL
       ? StackFrame::STUB
       : StackFrame::JAVA_SCRIPT;
-  trace_scope_ = TraceEnabledFor(type, frame_type) ?
-      new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
+  trace_scope_ = TraceEnabledFor(frame_type)
+                     ? new CodeTracer::Scope(isolate->GetCodeTracer())
+                     : NULL;
 #ifdef DEBUG
   CHECK(AllowHeapAllocation::IsAllowed());
   disallow_heap_allocation_ = new DisallowHeapAllocation();
@@ -539,21 +505,11 @@
   input_->SetFrameType(frame_type);
 }
 
-
-Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
-                                     Code* optimized_code) {
-  switch (bailout_type_) {
-    case Deoptimizer::SOFT:
-    case Deoptimizer::EAGER:
-    case Deoptimizer::LAZY: {
-      Code* compiled_code = FindDeoptimizingCode(from_);
-      return (compiled_code == NULL)
-          ? static_cast<Code*>(isolate_->FindCodeObject(from_))
-          : compiled_code;
-    }
-  }
-  FATAL("Could not find code for optimized function");
-  return NULL;
+Code* Deoptimizer::FindOptimizedCode(JSFunction* function) {
+  Code* compiled_code = FindDeoptimizingCode(from_);
+  return (compiled_code == NULL)
+             ? static_cast<Code*>(isolate_->FindCodeObject(from_))
+             : compiled_code;
 }
 
 
@@ -912,6 +868,10 @@
                                  output_offset);
   }
 
+  if (trace_scope_ != nullptr) {
+    PrintF(trace_scope_->file(), "    -------------------------\n");
+  }
+
   // There are no translation commands for the caller's pc and fp, the
   // context, and the function.  Synthesize their values and set them up
   // explicitly.
@@ -969,11 +929,11 @@
   // so long as we don't inline functions that need local contexts.
   output_offset -= kPointerSize;
 
-  TranslatedFrame::iterator context_pos = value_iterator;
-  int context_input_index = input_index;
   // When deoptimizing into a catch block, we need to take the context
   // from just above the top of the operand stack (we push the context
   // at the entry of the try block).
+  TranslatedFrame::iterator context_pos = value_iterator;
+  int context_input_index = input_index;
   if (goto_catch_handler) {
     for (unsigned i = 0; i < height + 1; ++i) {
       context_pos++;
@@ -991,10 +951,6 @@
   }
   value = reinterpret_cast<intptr_t>(context);
   output_frame->SetContext(value);
-  if (is_topmost) {
-    Register context_reg = JavaScriptFrame::context_register();
-    output_frame->SetRegister(context_reg.code(), value);
-  }
   WriteValueToOutput(context, context_input_index, frame_index, output_offset,
                      "context    ");
   if (context == isolate_->heap()->arguments_marker()) {
@@ -1011,6 +967,10 @@
   value = reinterpret_cast<intptr_t>(function);
   WriteValueToOutput(function, 0, frame_index, output_offset, "function    ");
 
+  if (trace_scope_ != nullptr) {
+    PrintF(trace_scope_->file(), "    -------------------------\n");
+  }
+
   // Translate the rest of the frame.
   for (unsigned i = 0; i < height; ++i) {
     output_offset -= kPointerSize;
@@ -1060,6 +1020,15 @@
           : FullCodeGenerator::BailoutStateField::decode(pc_and_state);
   output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
 
+  // Clear the context register. The context might be a de-materialized object
+  // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+  // safety we use Smi(0) instead of the potential {arguments_marker} here.
+  if (is_topmost) {
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), context_value);
+  }
+
   // Set the continuation for the topmost frame.
   if (is_topmost) {
     Builtins* builtins = isolate_->builtins();
@@ -1082,11 +1051,20 @@
   SharedFunctionInfo* shared = translated_frame->raw_shared_info();
 
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
   int input_index = 0;
 
   int bytecode_offset = translated_frame->node_id().ToInt();
   unsigned height = translated_frame->height();
   unsigned height_in_bytes = height * kPointerSize;
+
+  // All tranlations for interpreted frames contain the accumulator and hence
+  // are assumed to be in bailout state {BailoutState::TOS_REGISTER}. However
+  // such a state is only supported for the topmost frame. We need to skip
+  // pushing the accumulator for any non-topmost frame.
+  if (!is_topmost) height_in_bytes -= kPointerSize;
+
   JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
   value_iterator++;
   input_index++;
@@ -1113,8 +1091,6 @@
       FrameDescription(output_frame_size, parameter_count);
   output_frame->SetFrameType(StackFrame::INTERPRETED);
 
-  bool is_bottommost = (0 == frame_index);
-  bool is_topmost = (output_count_ - 1 == frame_index);
   CHECK(frame_index >= 0 && frame_index < output_count_);
   CHECK_NULL(output_[frame_index]);
   output_[frame_index] = output_frame;
@@ -1137,6 +1113,10 @@
                                  output_offset);
   }
 
+  if (trace_scope_ != nullptr) {
+    PrintF(trace_scope_->file(), "    -------------------------\n");
+  }
+
   // There are no translation commands for the caller's pc and fp, the
   // context, the function, new.target and the bytecode offset.  Synthesize
   // their values and set them up
@@ -1193,7 +1173,6 @@
   // For the bottommost output frame the context can be gotten from the input
   // frame. For all subsequent output frames it can be gotten from the function
   // so long as we don't inline functions that need local contexts.
-  Register context_reg = InterpretedFrame::context_register();
   output_offset -= kPointerSize;
 
   // When deoptimizing into a catch block, we need to take the context
@@ -1210,13 +1189,16 @@
   }
   // Read the context from the translations.
   Object* context = context_pos->GetRawValue();
-  // The context should not be a placeholder for a materialized object.
-  CHECK(context != isolate_->heap()->arguments_marker());
   value = reinterpret_cast<intptr_t>(context);
   output_frame->SetContext(value);
-  if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
   WriteValueToOutput(context, context_input_index, frame_index, output_offset,
                      "context    ");
+  if (context == isolate_->heap()->arguments_marker()) {
+    Address output_address =
+        reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+        output_offset;
+    values_to_materialize_.push_back({output_address, context_pos});
+  }
   value_iterator++;
   input_index++;
 
@@ -1248,6 +1230,10 @@
   WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
                      "bytecode offset ");
 
+  if (trace_scope_ != nullptr) {
+    PrintF(trace_scope_->file(), "    -------------------------\n");
+  }
+
   // Translate the rest of the interpreter registers in the frame.
   for (unsigned i = 0; i < height - 1; ++i) {
     output_offset -= kPointerSize;
@@ -1255,20 +1241,30 @@
                                  output_offset);
   }
 
-  // Put the accumulator on the stack. It will be popped by the
-  // InterpreterNotifyDeopt builtin (possibly after materialization).
-  output_offset -= kPointerSize;
-  if (goto_catch_handler) {
-    // If we are lazy deopting to a catch handler, we set the accumulator to
-    // the exception (which lives in the result register).
-    intptr_t accumulator_value =
-        input_->GetRegister(FullCodeGenerator::result_register().code());
-    WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
-                       frame_index, output_offset, "accumulator ");
-    value_iterator++;
+  // Translate the accumulator register (depending on frame position).
+  if (is_topmost) {
+    // For topmost frmae, p ut the accumulator on the stack. The bailout state
+    // for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
+    // the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
+    // after materialization).
+    output_offset -= kPointerSize;
+    if (goto_catch_handler) {
+      // If we are lazy deopting to a catch handler, we set the accumulator to
+      // the exception (which lives in the result register).
+      intptr_t accumulator_value =
+          input_->GetRegister(FullCodeGenerator::result_register().code());
+      WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
+                         frame_index, output_offset, "accumulator ");
+      value_iterator++;
+    } else {
+      WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+                                   output_offset, "accumulator ");
+    }
   } else {
-    WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
-                                 output_offset);
+    // For non-topmost frames, skip the accumulator translation. For those
+    // frames, the return value from the callee will become the accumulator.
+    value_iterator++;
+    input_index++;
   }
   CHECK_EQ(0u, output_offset);
 
@@ -1292,6 +1288,15 @@
     }
   }
 
+  // Clear the context register. The context might be a de-materialized object
+  // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+  // safety we use Smi(0) instead of the potential {arguments_marker} here.
+  if (is_topmost) {
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), context_value);
+  }
+
   // Set the continuation for the topmost frame.
   if (is_topmost) {
     Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
@@ -1595,10 +1600,6 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (is_topmost) {
-    Register context_reg = JavaScriptFrame::context_register();
-    output_frame->SetRegister(context_reg.code(), value);
-  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
 
   // The allocation site.
@@ -1654,6 +1655,15 @@
     }
   }
 
+  // Clear the context register. The context might be a de-materialized object
+  // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+  // safety we use Smi(0) instead of the potential {arguments_marker} here.
+  if (is_topmost) {
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), context_value);
+  }
+
   // Set the continuation for the topmost frame.
   if (is_topmost) {
     Builtins* builtins = isolate_->builtins();
@@ -1780,10 +1790,6 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (is_topmost) {
-    Register context_reg = JavaScriptFrame::context_register();
-    output_frame->SetRegister(context_reg.code(), value);
-  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
 
   // Skip receiver.
@@ -1833,6 +1839,15 @@
     }
   }
 
+  // Clear the context register. The context might be a de-materialized object
+  // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+  // safety we use Smi(0) instead of the potential {arguments_marker} here.
+  if (is_topmost) {
+    intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), context_value);
+  }
+
   // Set the continuation for the topmost frame.
   if (is_topmost) {
     Builtins* builtins = isolate_->builtins();
@@ -2214,15 +2229,6 @@
   return height * kPointerSize;
 }
 
-
-Object* Deoptimizer::ComputeLiteral(int index) const {
-  DeoptimizationInputData* data =
-      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
-  FixedArray* literals = data->LiteralArray();
-  return literals->get(index);
-}
-
-
 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
                                                    BailoutType type,
                                                    int max_entry_id) {
@@ -2281,33 +2287,6 @@
   }
 }
 
-
-int FrameDescription::ComputeFixedSize() {
-  if (type_ == StackFrame::INTERPRETED) {
-    return InterpreterFrameConstants::kFixedFrameSize +
-           parameter_count() * kPointerSize;
-  } else {
-    return StandardFrameConstants::kFixedFrameSize +
-           parameter_count() * kPointerSize;
-  }
-}
-
-
-unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
-  if (slot_index >= 0) {
-    // Local or spill slots. Skip the fixed part of the frame
-    // including all arguments.
-    unsigned base = GetFrameSize() - ComputeFixedSize();
-    return base - ((slot_index + 1) * kPointerSize);
-  } else {
-    // Incoming parameter.
-    int arg_size = parameter_count() * kPointerSize;
-    unsigned base = GetFrameSize() - arg_size;
-    return base - ((slot_index + 1) * kPointerSize);
-  }
-}
-
-
 void TranslationBuffer::Add(int32_t value, Zone* zone) {
   // This wouldn't handle kMinInt correctly if it ever encountered it.
   DCHECK(value != kMinInt);
@@ -3746,8 +3725,8 @@
           return object;
         }
         case JS_ARRAY_TYPE: {
-          Handle<JSArray> object =
-              isolate_->factory()->NewJSArray(0, map->elements_kind());
+          Handle<JSArray> object = Handle<JSArray>::cast(
+              isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
           slot->value_ = object;
           Handle<Object> properties = MaterializeAt(frame_index, value_index);
           Handle<Object> elements = MaterializeAt(frame_index, value_index);
@@ -3758,14 +3737,15 @@
           return object;
         }
         case JS_FUNCTION_TYPE: {
+          Handle<SharedFunctionInfo> temporary_shared =
+              isolate_->factory()->NewSharedFunctionInfo(
+                  isolate_->factory()->empty_string(), MaybeHandle<Code>(),
+                  false);
           Handle<JSFunction> object =
               isolate_->factory()->NewFunctionFromSharedFunctionInfo(
-                  handle(isolate_->object_function()->shared()),
-                  handle(isolate_->context()));
+                  map, temporary_shared, isolate_->factory()->undefined_value(),
+                  NOT_TENURED);
           slot->value_ = object;
-          // We temporarily allocated a JSFunction for the {Object} function
-          // within the current context, to break cycles in the object graph.
-          // The correct function and context will be set below once available.
           Handle<Object> properties = MaterializeAt(frame_index, value_index);
           Handle<Object> elements = MaterializeAt(frame_index, value_index);
           Handle<Object> prototype = MaterializeAt(frame_index, value_index);
@@ -3786,6 +3766,36 @@
           CHECK(next_link->IsUndefined(isolate_));
           return object;
         }
+        case CONS_STRING_TYPE: {
+          Handle<ConsString> object = Handle<ConsString>::cast(
+              isolate_->factory()
+                  ->NewConsString(isolate_->factory()->undefined_string(),
+                                  isolate_->factory()->undefined_string())
+                  .ToHandleChecked());
+          slot->value_ = object;
+          Handle<Object> hash = MaterializeAt(frame_index, value_index);
+          Handle<Object> length = MaterializeAt(frame_index, value_index);
+          Handle<Object> first = MaterializeAt(frame_index, value_index);
+          Handle<Object> second = MaterializeAt(frame_index, value_index);
+          object->set_map(*map);
+          object->set_length(Smi::cast(*length)->value());
+          object->set_first(String::cast(*first));
+          object->set_second(String::cast(*second));
+          CHECK(hash->IsNumber());  // The {Name::kEmptyHashField} value.
+          return object;
+        }
+        case CONTEXT_EXTENSION_TYPE: {
+          Handle<ContextExtension> object =
+              isolate_->factory()->NewContextExtension(
+                  isolate_->factory()->NewScopeInfo(1),
+                  isolate_->factory()->undefined_value());
+          slot->value_ = object;
+          Handle<Object> scope_info = MaterializeAt(frame_index, value_index);
+          Handle<Object> extension = MaterializeAt(frame_index, value_index);
+          object->set_scope_info(ScopeInfo::cast(*scope_info));
+          object->set_extension(*extension);
+          return object;
+        }
         case FIXED_ARRAY_TYPE: {
           Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
           int32_t length = 0;
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 7822d1c..4fb7851 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -383,8 +383,7 @@
     bool needs_frame;
   };
 
-  static bool TraceEnabledFor(BailoutType deopt_type,
-                              StackFrame::Type frame_type);
+  static bool TraceEnabledFor(StackFrame::Type frame_type);
   static const char* MessageFor(BailoutType type);
 
   int output_count() const { return output_count_; }
@@ -500,8 +499,6 @@
     int count_;
   };
 
-  int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
-
   static size_t GetMaxDeoptTableSize();
 
   static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
@@ -514,14 +511,9 @@
   static const int kMinNumberOfEntries = 64;
   static const int kMaxNumberOfEntries = 16384;
 
-  Deoptimizer(Isolate* isolate,
-              JSFunction* function,
-              BailoutType type,
-              unsigned bailout_id,
-              Address from,
-              int fp_to_sp_delta,
-              Code* optimized_code);
-  Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
+  Deoptimizer(Isolate* isolate, JSFunction* function, BailoutType type,
+              unsigned bailout_id, Address from, int fp_to_sp_delta);
+  Code* FindOptimizedCode(JSFunction* function);
   void PrintFunctionName();
   void DeleteFrameDescriptions();
 
@@ -560,8 +552,6 @@
   static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
   static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
 
-  Object* ComputeLiteral(int index) const;
-
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -711,8 +701,6 @@
     return static_cast<uint32_t>(frame_size_);
   }
 
-  unsigned GetOffsetFromSlotIndex(int slot_index);
-
   intptr_t GetFrameSlot(unsigned offset) {
     return *GetFrameSlotPointer(offset);
   }
@@ -833,8 +821,6 @@
     return reinterpret_cast<intptr_t*>(
         reinterpret_cast<Address>(this) + frame_content_offset() + offset);
   }
-
-  int ComputeFixedSize();
 };
 
 
diff --git a/src/effects.h b/src/effects.h
index 0204718..f8b1bd9 100644
--- a/src/effects.h
+++ b/src/effects.h
@@ -5,7 +5,7 @@
 #ifndef V8_EFFECTS_H_
 #define V8_EFFECTS_H_
 
-#include "src/types.h"
+#include "src/ast/ast-types.h"
 
 namespace v8 {
 namespace internal {
@@ -28,31 +28,31 @@
   enum Modality { POSSIBLE, DEFINITE };
 
   Modality modality;
-  Bounds bounds;
+  AstBounds bounds;
 
   Effect() : modality(DEFINITE) {}
-  explicit Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
+  explicit Effect(AstBounds b, Modality m = DEFINITE)
+      : modality(m), bounds(b) {}
 
   // The unknown effect.
   static Effect Unknown(Zone* zone) {
-    return Effect(Bounds::Unbounded(), POSSIBLE);
+    return Effect(AstBounds::Unbounded(), POSSIBLE);
   }
 
   static Effect Forget(Zone* zone) {
-    return Effect(Bounds::Unbounded(), DEFINITE);
+    return Effect(AstBounds::Unbounded(), DEFINITE);
   }
 
   // Sequential composition, as in 'e1; e2'.
   static Effect Seq(Effect e1, Effect e2, Zone* zone) {
     if (e2.modality == DEFINITE) return e2;
-    return Effect(Bounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
+    return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
   }
 
   // Alternative composition, as in 'cond ? e1 : e2'.
   static Effect Alt(Effect e1, Effect e2, Zone* zone) {
-    return Effect(
-        Bounds::Either(e1.bounds, e2.bounds, zone),
-        e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
+    return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone),
+                  e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
   }
 };
 
@@ -84,10 +84,10 @@
         ? locator.value() : Effect::Unknown(Base::zone());
   }
 
-  Bounds LookupBounds(Var var) {
+  AstBounds LookupBounds(Var var) {
     Effect effect = Lookup(var);
-    return effect.modality == Effect::DEFINITE
-        ? effect.bounds : Bounds::Unbounded();
+    return effect.modality == Effect::DEFINITE ? effect.bounds
+                                               : AstBounds::Unbounded();
   }
 
   // Sequential composition.
diff --git a/src/elements.cc b/src/elements.cc
index 56d8001..fb73d6c 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -911,6 +911,30 @@
     Subclass::GrowCapacityAndConvertImpl(object, capacity);
   }
 
+  bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
+    // This function is intended to be called from optimized code. We don't
+    // want to trigger lazy deopts there, so refuse to handle cases that would.
+    if (object->map()->is_prototype_map() ||
+        object->WouldConvertToSlowElements(index)) {
+      return false;
+    }
+    Handle<FixedArrayBase> old_elements(object->elements());
+    uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
+    DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
+    Handle<FixedArrayBase> elements =
+        ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity);
+
+    DCHECK_EQ(object->GetElementsKind(), kind());
+    // Transition through the allocation site as well if present.
+    if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+            object, kind())) {
+      return false;
+    }
+
+    object->set_elements(*elements);
+    return true;
+  }
+
   void Delete(Handle<JSObject> obj, uint32_t entry) final {
     Subclass::DeleteImpl(obj, entry);
   }
@@ -1165,13 +1189,13 @@
   static uint32_t GetEntryForIndexImpl(JSObject* holder,
                                        FixedArrayBase* backing_store,
                                        uint32_t index, PropertyFilter filter) {
+    uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
     if (IsHoleyElementsKind(kind())) {
-      return index < Subclass::GetCapacityImpl(holder, backing_store) &&
+      return index < length &&
                      !BackingStore::cast(backing_store)->is_the_hole(index)
                  ? index
                  : kMaxUInt32;
     } else {
-      uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
       return index < length ? index : kMaxUInt32;
     }
   }
@@ -2922,8 +2946,7 @@
     FixedArray* parameter_map = FixedArray::cast(parameters);
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
-      return !GetParameterMapArg(parameter_map, entry)
-                  ->IsTheHole(parameter_map->GetIsolate());
+      return HasParameterMapArg(parameter_map, entry);
     }
 
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
@@ -2951,8 +2974,7 @@
                                        FixedArrayBase* parameters,
                                        uint32_t index, PropertyFilter filter) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
-    Object* probe = GetParameterMapArg(parameter_map, index);
-    if (!probe->IsTheHole(holder->GetIsolate())) return index;
+    if (HasParameterMapArg(parameter_map, index)) return index;
 
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
@@ -2971,11 +2993,11 @@
     return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
   }
 
-  static Object* GetParameterMapArg(FixedArray* parameter_map, uint32_t index) {
+  static bool HasParameterMapArg(FixedArray* parameter_map, uint32_t index) {
     uint32_t length = parameter_map->length() - 2;
-    return index < length
-               ? parameter_map->get(index + 2)
-               : Object::cast(parameter_map->GetHeap()->the_hole_value());
+    if (index >= length) return false;
+    return !parameter_map->get(index + 2)->IsTheHole(
+        parameter_map->GetIsolate());
   }
 
   static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -3012,7 +3034,7 @@
       Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
       PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
       uint32_t insertion_index = 0) {
-    FixedArray* parameter_map = FixedArray::cast(*backing_store);
+    Handle<FixedArray> parameter_map(FixedArray::cast(*backing_store), isolate);
     uint32_t length = parameter_map->length() - 2;
 
     for (uint32_t i = 0; i < length; ++i) {
@@ -3038,18 +3060,19 @@
                                        uint32_t start_from, uint32_t length) {
     DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
     Handle<Map> original_map = handle(object->map(), isolate);
-    FixedArray* parameter_map = FixedArray::cast(object->elements());
+    Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()),
+                                     isolate);
     bool search_for_hole = value->IsUndefined(isolate);
 
     for (uint32_t k = start_from; k < length; ++k) {
       uint32_t entry =
-          GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+          GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
       if (entry == kMaxUInt32) {
         if (search_for_hole) return Just(true);
         continue;
       }
 
-      Handle<Object> element_k = GetImpl(parameter_map, entry);
+      Handle<Object> element_k = GetImpl(*parameter_map, entry);
 
       if (element_k->IsAccessorPair()) {
         LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3078,16 +3101,17 @@
                                          uint32_t start_from, uint32_t length) {
     DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
     Handle<Map> original_map = handle(object->map(), isolate);
-    FixedArray* parameter_map = FixedArray::cast(object->elements());
+    Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()),
+                                     isolate);
 
     for (uint32_t k = start_from; k < length; ++k) {
       uint32_t entry =
-          GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+          GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
       if (entry == kMaxUInt32) {
         continue;
       }
 
-      Handle<Object> element_k = GetImpl(parameter_map, entry);
+      Handle<Object> element_k = GetImpl(*parameter_map, entry);
 
       if (element_k->IsAccessorPair()) {
         LookupIterator it(isolate, object, k, LookupIterator::OWN);
diff --git a/src/elements.h b/src/elements.h
index 1ffd4d9..76e1aa6 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -114,6 +114,9 @@
                                       Handle<Map> map) = 0;
   virtual void GrowCapacityAndConvert(Handle<JSObject> object,
                                       uint32_t capacity) = 0;
+  // Unlike GrowCapacityAndConvert do not attempt to convert the backing store
+  // and simply return false in this case.
+  virtual bool GrowCapacity(Handle<JSObject> object, uint32_t index) = 0;
 
   static void InitializeOncePerProcess();
   static void TearDown();
diff --git a/src/execution.cc b/src/execution.cc
index c42d164..59421c7 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -136,8 +136,6 @@
       PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
     }
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-        isolate, &tracing::TraceEventStatsTable::JS_Execution);
     value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
                                 argc, argv);
   }
@@ -436,31 +434,6 @@
 // --- C a l l s   t o   n a t i v e s ---
 
 
-Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
-                                            Handle<JSFunction> fun,
-                                            Handle<Object> pos,
-                                            Handle<Object> is_global) {
-  Isolate* isolate = fun->GetIsolate();
-  Handle<Object> strict_mode = isolate->factory()->ToBoolean(false);
-
-  MaybeHandle<Object> maybe_callsite =
-      CallSiteUtils::Construct(isolate, recv, fun, pos, strict_mode);
-  if (maybe_callsite.is_null()) {
-    isolate->clear_pending_exception();
-    return isolate->factory()->empty_string();
-  }
-
-  MaybeHandle<String> maybe_to_string =
-      CallSiteUtils::ToString(isolate, maybe_callsite.ToHandleChecked());
-  if (maybe_to_string.is_null()) {
-    isolate->clear_pending_exception();
-    return isolate->factory()->empty_string();
-  }
-
-  return maybe_to_string.ToHandleChecked();
-}
-
-
 void StackGuard::HandleGCInterrupt() {
   if (CheckAndClearInterrupt(GC_REQUEST)) {
     isolate_->heap()->HandleGCRequest();
diff --git a/src/execution.h b/src/execution.h
index 52c7628..6f4bb33 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -21,11 +21,9 @@
   // When the function called is not in strict mode, receiver is
   // converted to an object.
   //
-  MUST_USE_RESULT static MaybeHandle<Object> Call(Isolate* isolate,
-                                                  Handle<Object> callable,
-                                                  Handle<Object> receiver,
-                                                  int argc,
-                                                  Handle<Object> argv[]);
+  V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Call(
+      Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+      int argc, Handle<Object> argv[]);
 
   // Construct object from function, the caller supplies an array of
   // arguments.
@@ -48,11 +46,6 @@
                                      Handle<Object> receiver, int argc,
                                      Handle<Object> argv[],
                                      MaybeHandle<Object>* exception_out = NULL);
-
-  static Handle<String> GetStackTraceLine(Handle<Object> recv,
-                                          Handle<JSFunction> fun,
-                                          Handle<Object> pos,
-                                          Handle<Object> is_global);
 };
 
 
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index 5aafb7a..da53336 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -67,7 +67,8 @@
         args[0]
             ->BooleanValue(args.GetIsolate()->GetCurrentContext())
             .FromMaybe(false)) {
-      heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
+      heap->CollectAllGarbage(Heap::kNoGCFlags,
+                              GarbageCollectionReason::kCountersExtension);
     }
   }
 
@@ -116,19 +117,24 @@
   };
 
   const StatisticNumber numbers[] = {
-      {heap->memory_allocator()->Size(), "total_committed_bytes"},
+      {static_cast<intptr_t>(heap->memory_allocator()->Size()),
+       "total_committed_bytes"},
       {heap->new_space()->Size(), "new_space_live_bytes"},
       {heap->new_space()->Available(), "new_space_available_bytes"},
-      {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
+      {static_cast<intptr_t>(heap->new_space()->CommittedMemory()),
+       "new_space_commited_bytes"},
       {heap->old_space()->Size(), "old_space_live_bytes"},
       {heap->old_space()->Available(), "old_space_available_bytes"},
-      {heap->old_space()->CommittedMemory(), "old_space_commited_bytes"},
+      {static_cast<intptr_t>(heap->old_space()->CommittedMemory()),
+       "old_space_commited_bytes"},
       {heap->code_space()->Size(), "code_space_live_bytes"},
       {heap->code_space()->Available(), "code_space_available_bytes"},
-      {heap->code_space()->CommittedMemory(), "code_space_commited_bytes"},
+      {static_cast<intptr_t>(heap->code_space()->CommittedMemory()),
+       "code_space_commited_bytes"},
       {heap->lo_space()->Size(), "lo_space_live_bytes"},
       {heap->lo_space()->Available(), "lo_space_available_bytes"},
-      {heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
+      {static_cast<intptr_t>(heap->lo_space()->CommittedMemory()),
+       "lo_space_commited_bytes"},
   };
 
   for (size_t i = 0; i < arraysize(numbers); i++) {
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
index 5833eef..f908be1 100644
--- a/src/external-reference-table.cc
+++ b/src/external-reference-table.cc
@@ -215,10 +215,6 @@
       "double_constants.minus_one_half");
   Add(ExternalReference::stress_deopt_count(isolate).address(),
       "Isolate::stress_deopt_count_address()");
-  Add(ExternalReference::virtual_handler_register(isolate).address(),
-      "Isolate::virtual_handler_register()");
-  Add(ExternalReference::virtual_slot_register(isolate).address(),
-      "Isolate::virtual_slot_register()");
   Add(ExternalReference::runtime_function_table_address(isolate).address(),
       "Runtime::runtime_function_table_address()");
   Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
diff --git a/src/factory.cc b/src/factory.cc
index bedcb9b..163e864 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -37,13 +37,15 @@
     RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE)                                 \
     /* Two GCs before panicking.  In newspace will almost always succeed. */  \
     for (int __i__ = 0; __i__ < 2; __i__++) {                                 \
-      (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),          \
-                                        "allocation failure");                \
+      (ISOLATE)->heap()->CollectGarbage(                                      \
+          __allocation__.RetrySpace(),                                        \
+          GarbageCollectionReason::kAllocationFailure);                       \
       __allocation__ = FUNCTION_CALL;                                         \
       RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE)                               \
     }                                                                         \
     (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();        \
-    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");          \
+    (ISOLATE)->heap()->CollectAllAvailableGarbage(                            \
+        GarbageCollectionReason::kLastResort);                                \
     {                                                                         \
       AlwaysAllocateScope __scope__(ISOLATE);                                 \
       __allocation__ = FUNCTION_CALL;                                         \
@@ -54,7 +56,6 @@
     return Handle<TYPE>();                                                    \
   } while (false)
 
-
 template<typename T>
 Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) {
   CALL_HEAP_FUNCTION(
@@ -91,7 +92,6 @@
   return result;
 }
 
-
 Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
   Handle<PrototypeInfo> result =
       Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
@@ -102,14 +102,10 @@
   return result;
 }
 
-
-Handle<SloppyBlockWithEvalContextExtension>
-Factory::NewSloppyBlockWithEvalContextExtension(
-    Handle<ScopeInfo> scope_info, Handle<JSObject> extension) {
-  DCHECK(scope_info->is_declaration_scope());
-  Handle<SloppyBlockWithEvalContextExtension> result =
-      Handle<SloppyBlockWithEvalContextExtension>::cast(
-          NewStruct(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE));
+Handle<ContextExtension> Factory::NewContextExtension(
+    Handle<ScopeInfo> scope_info, Handle<Object> extension) {
+  Handle<ContextExtension> result =
+      Handle<ContextExtension>::cast(NewStruct(CONTEXT_EXTENSION_TYPE));
   result->set_scope_info(*scope_info);
   result->set_extension(*extension);
   return result;
@@ -178,6 +174,14 @@
   return array;
 }
 
+Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
+                                          PretenureFlag pretenure) {
+  DCHECK_LE(0, number_of_frames);
+  Handle<FixedArray> result =
+      NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
+  result->set(FrameArray::kFrameCountIndex, Smi::FromInt(0));
+  return Handle<FrameArray>::cast(result);
+}
 
 Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
   return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
@@ -595,6 +599,19 @@
   return result;
 }
 
+Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
+  DCHECK_GE(lead, 0xD800);
+  DCHECK_LE(lead, 0xDBFF);
+  DCHECK_GE(trail, 0xDC00);
+  DCHECK_LE(trail, 0xDFFF);
+
+  Handle<SeqTwoByteString> str =
+      isolate()->factory()->NewRawTwoByteString(2).ToHandleChecked();
+  uc16* dest = str->GetChars();
+  dest[0] = lead;
+  dest[1] = trail;
+  return str;
+}
 
 Handle<String> Factory::NewProperSubString(Handle<String> str,
                                            int begin,
@@ -729,6 +746,17 @@
   return external_string;
 }
 
+Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
+  Handle<Map> map(isolate()->native_context()->string_iterator_map(),
+                  isolate());
+  Handle<String> flat_string = String::Flatten(string);
+  Handle<JSStringIterator> iterator =
+      Handle<JSStringIterator>::cast(NewJSObjectFromMap(map));
+  iterator->set_string(*flat_string);
+  iterator->set_index(0);
+
+  return iterator;
+}
 
 Handle<Symbol> Factory::NewSymbol() {
   CALL_HEAP_FUNCTION(
@@ -784,15 +812,19 @@
   return context_table;
 }
 
-
-Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
+Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+                                          Handle<JSFunction> function,
+                                          Handle<ScopeInfo> scope_info) {
   DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
   Handle<FixedArray> array =
       NewFixedArray(scope_info->ContextLength(), TENURED);
   array->set_map_no_write_barrier(*module_context_map());
-  // Instance link will be set later.
   Handle<Context> context = Handle<Context>::cast(array);
-  context->set_extension(*the_hole_value());
+  context->set_closure(*function);
+  context->set_previous(function->context());
+  context->set_extension(*module);
+  context->set_native_context(function->native_context());
+  DCHECK(context->IsModuleContext());
   return context;
 }
 
@@ -811,35 +843,41 @@
   return context;
 }
 
-
 Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
                                          Handle<Context> previous,
+                                         Handle<ScopeInfo> scope_info,
                                          Handle<String> name,
                                          Handle<Object> thrown_object) {
   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+  Handle<ContextExtension> extension = NewContextExtension(scope_info, name);
   Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
   array->set_map_no_write_barrier(*catch_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_closure(*function);
   context->set_previous(*previous);
-  context->set_extension(*name);
+  context->set_extension(*extension);
   context->set_native_context(previous->native_context());
   context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
   return context;
 }
 
 Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
+                                                 Handle<ScopeInfo> scope_info,
                                                  Handle<JSReceiver> extension,
                                                  Handle<Context> wrapped,
                                                  Handle<StringSet> whitelist) {
   STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+  DCHECK(scope_info->IsDebugEvaluateScope());
+  Handle<ContextExtension> context_extension = NewContextExtension(
+      scope_info, extension.is_null() ? Handle<Object>::cast(undefined_value())
+                                      : Handle<Object>::cast(extension));
   Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 2);
   array->set_map_no_write_barrier(*debug_evaluate_context_map());
   Handle<Context> c = Handle<Context>::cast(array);
   c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
   c->set_previous(*previous);
   c->set_native_context(previous->native_context());
-  if (!extension.is_null()) c->set(Context::EXTENSION_INDEX, *extension);
+  c->set_extension(*context_extension);
   if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
   if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
   return c;
@@ -847,13 +885,16 @@
 
 Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
                                         Handle<Context> previous,
+                                        Handle<ScopeInfo> scope_info,
                                         Handle<JSReceiver> extension) {
+  Handle<ContextExtension> context_extension =
+      NewContextExtension(scope_info, extension);
   Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS);
   array->set_map_no_write_barrier(*with_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_closure(*function);
   context->set_previous(*previous);
-  context->set_extension(*extension);
+  context->set_extension(*context_extension);
   context->set_native_context(previous->native_context());
   return context;
 }
@@ -881,6 +922,20 @@
       Struct);
 }
 
+Handle<PromiseContainer> Factory::NewPromiseContainer(
+    Handle<JSReceiver> thenable, Handle<JSReceiver> then,
+    Handle<JSFunction> resolve, Handle<JSFunction> reject,
+    Handle<Object> before_debug_event, Handle<Object> after_debug_event) {
+  Handle<PromiseContainer> result =
+      Handle<PromiseContainer>::cast(NewStruct(PROMISE_CONTAINER_TYPE));
+  result->set_thenable(*thenable);
+  result->set_then(*then);
+  result->set_resolve(*resolve);
+  result->set_reject(*reject);
+  result->set_before_debug_event(*before_debug_event);
+  result->set_after_debug_event(*after_debug_event);
+  return result;
+}
 
 Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
     int aliased_context_slot) {
@@ -1196,6 +1251,13 @@
   return maybe_error.ToHandleChecked();
 }
 
+Handle<Object> Factory::NewInvalidStringLengthError() {
+  // Invalidate the "string length" protector.
+  if (isolate()->IsStringLengthOverflowIntact()) {
+    isolate()->InvalidateStringLengthOverflowProtector();
+  }
+  return NewRangeError(MessageTemplate::kInvalidStringLength);
+}
 
 #define DEFINE_ERROR(NAME, name)                                              \
   Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
@@ -1296,7 +1358,7 @@
   // TODO(littledan): Why do we have this is_generator test when
   // NewFunctionPrototype already handles finding an appropriately
   // shared prototype?
-  if (!function->shared()->is_resumable()) {
+  if (!IsResumableFunction(function->shared()->kind())) {
     if (prototype->IsTheHole(isolate())) {
       prototype = NewFunctionPrototype(function);
     }
@@ -1322,12 +1384,11 @@
   // can be from a different context.
   Handle<Context> native_context(function->context()->native_context());
   Handle<Map> new_map;
-  if (function->shared()->is_resumable()) {
+  if (IsResumableFunction(function->shared()->kind())) {
     // Generator and async function prototypes can share maps since they
     // don't have "constructor" properties.
     new_map = handle(native_context->generator_object_prototype_map());
   } else {
-    CHECK(!function->shared()->is_async());
     // Each function prototype gets a fresh map to avoid unwanted sharing of
     // maps between prototypes of different constructors.
     Handle<JSFunction> object_function(native_context->object_function());
@@ -1338,7 +1399,7 @@
   DCHECK(!new_map->is_prototype_map());
   Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
 
-  if (!function->shared()->is_resumable()) {
+  if (!IsResumableFunction(function->shared()->kind())) {
     JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
   }
 
@@ -1385,6 +1446,17 @@
   return scope_info;
 }
 
+Handle<ModuleInfoEntry> Factory::NewModuleInfoEntry() {
+  Handle<FixedArray> array = NewFixedArray(ModuleInfoEntry::kLength, TENURED);
+  array->set_map_no_write_barrier(*module_info_entry_map());
+  return Handle<ModuleInfoEntry>::cast(array);
+}
+
+Handle<ModuleInfo> Factory::NewModuleInfo() {
+  Handle<FixedArray> array = NewFixedArray(ModuleInfo::kLength, TENURED);
+  array->set_map_no_write_barrier(*module_info_map());
+  return Handle<ModuleInfo>::cast(array);
+}
 
 Handle<JSObject> Factory::NewExternal(void* value) {
   Handle<Foreign> foreign = NewForeign(static_cast<Address>(value));
@@ -1666,7 +1738,7 @@
 
 Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
     Handle<JSFunction> function) {
-  DCHECK(function->shared()->is_resumable());
+  DCHECK(IsResumableFunction(function->shared()->kind()));
   JSFunction::EnsureHasInitialMap(function);
   Handle<Map> map(function->initial_map());
   DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
@@ -1676,6 +1748,29 @@
       JSGeneratorObject);
 }
 
+Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
+  Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
+                                 isolate());
+  Handle<ObjectHashTable> exports =
+      ObjectHashTable::New(isolate(), module_info->regular_exports()->length());
+  int requested_modules_length = module_info->module_requests()->length();
+  Handle<FixedArray> requested_modules =
+      requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
+                                   : empty_fixed_array();
+
+  // To make it easy to hash Modules, we set a new symbol as the name of
+  // SharedFunctionInfo representing this Module.
+  Handle<Symbol> name_symbol = NewSymbol();
+  code->set_name(*name_symbol);
+
+  Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE));
+  module->set_code(*code);
+  module->set_exports(*exports);
+  module->set_requested_modules(*requested_modules);
+  module->set_flags(0);
+  module->set_embedder_data(isolate()->heap()->undefined_value());
+  return module;
+}
 
 Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
                                                 PretenureFlag pretenure) {
@@ -1698,6 +1793,15 @@
       JSDataView);
 }
 
+Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
+                                                      bool done) {
+  Handle<Map> map(isolate()->native_context()->iterator_result_map());
+  Handle<JSIteratorResult> js_iter_result =
+      Handle<JSIteratorResult>::cast(NewJSObjectFromMap(map));
+  js_iter_result->set_value(*value);
+  js_iter_result->set_done(*ToBoolean(done));
+  return js_iter_result;
+}
 
 Handle<JSMap> Factory::NewJSMap() {
   Handle<Map> map(isolate()->native_context()->js_map_map());
@@ -2066,6 +2170,7 @@
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
       name, code, IsConstructable(kind, scope_info->language_mode()));
   shared->set_scope_info(*scope_info);
+  shared->set_outer_scope_info(*the_hole_value());
   shared->set_kind(kind);
   shared->set_num_literals(number_of_literals);
   if (IsGeneratorFunction(kind)) {
@@ -2112,6 +2217,7 @@
   share->set_code(*code);
   share->set_optimized_code_map(*cleared_optimized_code_map());
   share->set_scope_info(ScopeInfo::Empty(isolate()));
+  share->set_outer_scope_info(*the_hole_value());
   Handle<Code> construct_stub =
       is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
                      : isolate()->builtins()->ConstructedNonConstructable();
diff --git a/src/factory.h b/src/factory.h
index 4908d5f..82c2317 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -28,9 +28,8 @@
                              byte kind);
 
   // Allocates a fixed array initialized with undefined values.
-  Handle<FixedArray> NewFixedArray(
-      int size,
-      PretenureFlag pretenure = NOT_TENURED);
+  V8_EXPORT_PRIVATE Handle<FixedArray> NewFixedArray(
+      int size, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new fixed array with non-existing entries (the hole).
   Handle<FixedArray> NewFixedArrayWithHoles(
@@ -52,19 +51,27 @@
       int size,
       PretenureFlag pretenure = NOT_TENURED);
 
+  Handle<FrameArray> NewFrameArray(int number_of_frames,
+                                   PretenureFlag pretenure = NOT_TENURED);
+
   Handle<OrderedHashSet> NewOrderedHashSet();
   Handle<OrderedHashMap> NewOrderedHashMap();
 
   // Create a new boxed value.
   Handle<Box> NewBox(Handle<Object> value);
 
+  // Create a new PromiseContainer struct.
+  Handle<PromiseContainer> NewPromiseContainer(
+      Handle<JSReceiver> thenable, Handle<JSReceiver> then,
+      Handle<JSFunction> resolve, Handle<JSFunction> reject,
+      Handle<Object> before_debug_event, Handle<Object> after_debug_event);
+
   // Create a new PrototypeInfo struct.
   Handle<PrototypeInfo> NewPrototypeInfo();
 
-  // Create a new SloppyBlockWithEvalContextExtension struct.
-  Handle<SloppyBlockWithEvalContextExtension>
-  NewSloppyBlockWithEvalContextExtension(Handle<ScopeInfo> scope_info,
-                                         Handle<JSObject> extension);
+  // Create a new ContextExtension struct.
+  Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
+                                               Handle<Object> extension);
 
   // Create a pre-tenured empty AccessorPair.
   Handle<AccessorPair> NewAccessorPair();
@@ -74,7 +81,8 @@
 
   // Finds the internalized copy for string in the string table.
   // If not found, a new string is added to the table and returned.
-  Handle<String> InternalizeUtf8String(Vector<const char> str);
+  V8_EXPORT_PRIVATE Handle<String> InternalizeUtf8String(
+      Vector<const char> str);
   Handle<String> InternalizeUtf8String(const char* str) {
     return InternalizeUtf8String(CStrVector(str));
   }
@@ -119,9 +127,8 @@
   //     will be converted to Latin1, otherwise it will be left as two-byte.
   //
   // One-byte strings are pretenured when used as keys in the SourceCodeCache.
-  MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
-      Vector<const uint8_t> str,
-      PretenureFlag pretenure = NOT_TENURED);
+  V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
+      Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
 
   template <size_t N>
   inline Handle<String> NewStringFromStaticChars(
@@ -163,17 +170,17 @@
 
   // UTF8 strings are pretenured when used for regexp literal patterns and
   // flags in the parser.
-  MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
-      Vector<const char> str,
-      PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT V8_EXPORT_PRIVATE MaybeHandle<String> NewStringFromUtf8(
+      Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
 
-  MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
-      Vector<const uc16> str,
-      PretenureFlag pretenure = NOT_TENURED);
+  V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+      Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
 
   MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
       const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
 
+  Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
+
   // Allocates an internalized string in old space based on the character
   // stream.
   Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
@@ -215,6 +222,10 @@
   MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
                                                     Handle<String> right);
 
+  // Create or lookup a single characters tring made up of a utf16 surrogate
+  // pair.
+  Handle<String> NewSurrogatePairString(uint16_t lead, uint16_t trail);
+
   // Create a new string object which holds a proper substring of a string.
   Handle<String> NewProperSubString(Handle<String> str,
                                     int begin,
@@ -255,7 +266,9 @@
   Handle<ScriptContextTable> NewScriptContextTable();
 
   // Create a module context.
-  Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
+  Handle<Context> NewModuleContext(Handle<Module> module,
+                                   Handle<JSFunction> function,
+                                   Handle<ScopeInfo> scope_info);
 
   // Create a function context.
   Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@@ -263,15 +276,18 @@
   // Create a catch context.
   Handle<Context> NewCatchContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
+                                  Handle<ScopeInfo> scope_info,
                                   Handle<String> name,
                                   Handle<Object> thrown_object);
 
   // Create a 'with' context.
   Handle<Context> NewWithContext(Handle<JSFunction> function,
                                  Handle<Context> previous,
+                                 Handle<ScopeInfo> scope_info,
                                  Handle<JSReceiver> extension);
 
   Handle<Context> NewDebugEvaluateContext(Handle<Context> previous,
+                                          Handle<ScopeInfo> scope_info,
                                           Handle<JSReceiver> extension,
                                           Handle<Context> wrapped,
                                           Handle<StringSet> whitelist);
@@ -290,7 +306,7 @@
 
   Handle<AccessorInfo> NewAccessorInfo();
 
-  Handle<Script> NewScript(Handle<String> source);
+  V8_EXPORT_PRIVATE Handle<Script> NewScript(Handle<String> source);
 
   // Foreign objects are pretenured when allocated by the bootstrapper.
   Handle<Foreign> NewForeign(Address addr,
@@ -434,7 +450,7 @@
 
   // Create a JSArray with a specified length and elements initialized
   // according to the specified mode.
-  Handle<JSArray> NewJSArray(
+  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArray(
       ElementsKind elements_kind, int length, int capacity,
       ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
       PretenureFlag pretenure = NOT_TENURED);
@@ -450,11 +466,11 @@
   }
 
   // Create a JSArray with the given elements.
-  Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
-                                         ElementsKind elements_kind, int length,
-                                         PretenureFlag pretenure = NOT_TENURED);
+  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
+      Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+      PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<JSArray> NewJSArrayWithElements(
+  V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
       Handle<FixedArrayBase> elements,
       ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
       PretenureFlag pretenure = NOT_TENURED) {
@@ -470,6 +486,8 @@
 
   Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
 
+  Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+
   Handle<JSArrayBuffer> NewJSArrayBuffer(
       SharedFlag shared = SharedFlag::kNotShared,
       PretenureFlag pretenure = NOT_TENURED);
@@ -495,6 +513,8 @@
   Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
                                    size_t byte_offset, size_t byte_length);
 
+  Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
+
   Handle<JSMap> NewJSMap();
   Handle<JSSet> NewJSSet();
 
@@ -554,6 +574,9 @@
   // Create a serialized scope info.
   Handle<ScopeInfo> NewScopeInfo(int length);
 
+  Handle<ModuleInfoEntry> NewModuleInfoEntry();
+  Handle<ModuleInfo> NewModuleInfo();
+
   // Create an External object for V8's external API.
   Handle<JSObject> NewExternal(void* value);
 
@@ -576,9 +599,7 @@
   Handle<Object> NewError(Handle<JSFunction> constructor,
                           Handle<String> message);
 
-  Handle<Object> NewInvalidStringLengthError() {
-    return NewRangeError(MessageTemplate::kInvalidStringLength);
-  }
+  Handle<Object> NewInvalidStringLengthError();
 
   Handle<Object> NewURIError() {
     return NewError(isolate()->uri_error_function(),
diff --git a/src/fast-accessor-assembler.cc b/src/fast-accessor-assembler.cc
index ebaab9a..a9cde70 100644
--- a/src/fast-accessor-assembler.cc
+++ b/src/fast-accessor-assembler.cc
@@ -179,27 +179,35 @@
                              ExternalReference::DIRECT_API_CALL, isolate());
 
   // Create & call API callback via stub.
-  CallApiCallbackStub stub(isolate(), 1, true, true);
-  DCHECK_EQ(5, stub.GetCallInterfaceDescriptor().GetParameterCount());
-  DCHECK_EQ(1, stub.GetCallInterfaceDescriptor().GetStackParameterCount());
+  const int kJSParameterCount = 1;
+  CallApiCallbackStub stub(isolate(), kJSParameterCount, true, true);
+  CallInterfaceDescriptor descriptor = stub.GetCallInterfaceDescriptor();
+  DCHECK_EQ(4, descriptor.GetParameterCount());
+  DCHECK_EQ(0, descriptor.GetStackParameterCount());
   // TODO(vogelheim): There is currently no clean way to retrieve the context
   //     parameter for a stub and the implementation details are hidden in
   //     compiler/*. The context_paramter is computed as:
   //       Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
-  const int context_parameter = 3;
-  Node* call = assembler_->CallStub(
-      stub.GetCallInterfaceDescriptor(),
-      assembler_->HeapConstant(stub.GetCode()),
-      assembler_->Parameter(context_parameter),
+  const int kContextParameter = 3;
+  Node* context = assembler_->Parameter(kContextParameter);
+  Node* target = assembler_->HeapConstant(stub.GetCode());
 
-      // Stub/register parameters:
-      assembler_->UndefinedConstant(), /* callee (there's no JSFunction) */
-      assembler_->UndefinedConstant(), /* call_data (undefined) */
-      assembler_->Parameter(0), /* receiver (same as holder in this case) */
-      assembler_->ExternalConstant(callback), /* API callback function */
+  int param_count = descriptor.GetParameterCount();
+  Node** args = zone()->NewArray<Node*>(param_count + 1 + kJSParameterCount);
+  // Stub/register parameters:
+  args[0] = assembler_->UndefinedConstant();  // callee (there's no JSFunction)
+  args[1] = assembler_->UndefinedConstant();  // call_data (undefined)
+  args[2] = assembler_->Parameter(0);  // receiver (same as holder in this case)
+  args[3] = assembler_->ExternalConstant(callback);  // API callback function
 
-      // JS arguments, on stack:
-      FromId(arg));
+  // JS arguments, on stack:
+  args[4] = FromId(arg);
+
+  // Context.
+  args[5] = context;
+
+  Node* call =
+      assembler_->CallStubN(descriptor, kJSParameterCount, target, args);
 
   return FromRaw(call);
 }
diff --git a/src/field-type.cc b/src/field-type.cc
index 2e4cbfb..b3b24e2 100644
--- a/src/field-type.cc
+++ b/src/field-type.cc
@@ -4,9 +4,9 @@
 
 #include "src/field-type.h"
 
+#include "src/ast/ast-types.h"
 #include "src/handles-inl.h"
 #include "src/ostreams.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -71,11 +71,11 @@
 
 bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
 
-Type* FieldType::Convert(Zone* zone) {
-  if (IsAny()) return Type::NonInternal();
-  if (IsNone()) return Type::None();
+AstType* FieldType::Convert(Zone* zone) {
+  if (IsAny()) return AstType::NonInternal();
+  if (IsNone()) return AstType::None();
   DCHECK(IsClass());
-  return Type::Class(AsClass(), zone);
+  return AstType::Class(AsClass(), zone);
 }
 
 void FieldType::PrintTo(std::ostream& os) {
diff --git a/src/field-type.h b/src/field-type.h
index eb7ffca..11e1069 100644
--- a/src/field-type.h
+++ b/src/field-type.h
@@ -5,6 +5,7 @@
 #ifndef V8_FIELD_TYPE_H_
 #define V8_FIELD_TYPE_H_
 
+#include "src/ast/ast-types.h"
 #include "src/handles.h"
 #include "src/objects.h"
 #include "src/ostreams.h"
@@ -38,7 +39,7 @@
   bool NowStable();
   bool NowIs(FieldType* other);
   bool NowIs(Handle<FieldType> other);
-  Type* Convert(Zone* zone);
+  AstType* Convert(Zone* zone);
 
   void PrintTo(std::ostream& os);
 };
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index e5ddbad..779a589 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -23,14 +23,21 @@
 // this will just be an extern declaration, but for a readonly flag we let the
 // compiler make better optimizations by giving it the value.
 #if defined(FLAG_MODE_DECLARE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) extern ctype FLAG_##nam;
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
   static ctype const FLAG_##nam = def;
 
 // We want to supply the actual storage and value for the flag variable in the
 // .cc file.  We only do this for writable flags.
 #elif defined(FLAG_MODE_DEFINE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) ctype FLAG_##nam = def;
+#ifdef USING_V8_SHARED
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
+#else
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+  V8_EXPORT_PRIVATE ctype FLAG_##nam = def;
+#endif
 
 // We need to define all of our default values so that the Flag structure can
 // access them by pointer.  These are just used internally inside of one .cc,
@@ -119,31 +126,27 @@
 #else
 #define DEBUG_BOOL false
 #endif
-#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_VFP3_DEFAULT true
+
+// Supported ARM configurations are:
+//  "armv6":       ARMv6 + VFPv2
+//  "armv7":       ARMv7 + VFPv3-D32 + NEON
+//  "armv7+sudiv": ARMv7 + VFPv4-D32 + NEON + SUDIV
+//  "armv8":       ARMv8 (including all of the above)
+#if !defined(ARM_TEST_NO_FEATURE_PROBE) ||                            \
+    (defined(CAN_USE_ARMV8_INSTRUCTIONS) &&                           \
+     defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+     defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS))
+#define ARM_ARCH_DEFAULT "armv8"
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+    defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+#define ARM_ARCH_DEFAULT "armv7+sudiv"
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
+    defined(CAN_USE_VFP3_INSTRUCTIONS)
+#define ARM_ARCH_DEFAULT "armv7"
 #else
-#define ENABLE_VFP3_DEFAULT false
+#define ARM_ARCH_DEFAULT "armv6"
 #endif
-#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_ARMV7_DEFAULT true
-#else
-#define ENABLE_ARMV7_DEFAULT false
-#endif
-#if (defined CAN_USE_ARMV8_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_ARMV8_DEFAULT true
-#else
-#define ENABLE_ARMV8_DEFAULT false
-#endif
-#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_32DREGS_DEFAULT true
-#else
-#define ENABLE_32DREGS_DEFAULT false
-#endif
-#if (defined CAN_USE_NEON) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-# define ENABLE_NEON_DEFAULT true
-#else
-# define ENABLE_NEON_DEFAULT false
-#endif
+
 #ifdef V8_OS_WIN
 # define ENABLE_LOG_COLOUR false
 #else
@@ -184,9 +187,6 @@
 DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
 DEFINE_IMPLICATION(es_staging, harmony)
 
-DEFINE_BOOL(intl_extra, false, "additional V8 Intl functions")
-// Removing extra Intl functions is shipped
-DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, intl_extra, true)
 
 // Activate on ClusterFuzz.
 DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
@@ -198,7 +198,6 @@
   V(harmony_function_sent, "harmony function.sent")                     \
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
   V(harmony_simd, "harmony simd")                                       \
-  V(harmony_explicit_tailcalls, "harmony explicit tail calls")          \
   V(harmony_do_expressions, "harmony do-expressions")                   \
   V(harmony_restrictive_generators,                                     \
     "harmony restrictions on generator declarations")                   \
@@ -206,18 +205,19 @@
   V(harmony_regexp_property, "harmony unicode regexp property classes") \
   V(harmony_for_in, "harmony for-in syntax")                            \
   V(harmony_trailing_commas,                                            \
-    "harmony trailing commas in function parameter lists")
+    "harmony trailing commas in function parameter lists")              \
+  V(harmony_class_fields, "harmony public fields in class literals")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
 #define HARMONY_STAGED_BASE(V)                                               \
   V(harmony_regexp_lookbehind, "harmony regexp lookbehind")                  \
   V(harmony_tailcalls, "harmony tail calls")                                 \
-  V(harmony_async_await, "harmony async-await")                              \
   V(harmony_string_padding, "harmony String-padding methods")
 
 #ifdef V8_I18N_SUPPORT
-#define HARMONY_STAGED(V) \
-  HARMONY_STAGED_BASE(V)  \
+#define HARMONY_STAGED(V)                                          \
+  HARMONY_STAGED_BASE(V)                                           \
+  V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
   V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
 #else
 #define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -225,6 +225,7 @@
 
 // Features that are shipping (turned on by default, but internal flag remains).
 #define HARMONY_SHIPPING(V)                                                  \
+  V(harmony_async_await, "harmony async-await")                              \
   V(harmony_restrictive_declarations,                                        \
     "harmony limitations on sloppy mode function declarations")              \
   V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
@@ -335,7 +336,7 @@
 DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
 DEFINE_INT(max_inlined_source_size, 600,
            "maximum source size in bytes considered for a single inlining")
-DEFINE_INT(max_inlined_nodes, 196,
+DEFINE_INT(max_inlined_nodes, 200,
            "maximum number of AST nodes considered for a single inlining")
 DEFINE_INT(max_inlined_nodes_cumulative, 400,
            "maximum cumulative number of AST nodes considered for inlining")
@@ -405,6 +406,8 @@
 DEFINE_BOOL(inline_construct, true, "inline constructor calls")
 DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
 DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
+DEFINE_BOOL(inline_into_try, false, "inline into try blocks")
+DEFINE_IMPLICATION(turbo, inline_into_try)
 DEFINE_INT(escape_analysis_iterations, 2,
            "maximum number of escape analysis fix-point iterations")
 
@@ -450,6 +453,8 @@
 DEFINE_BOOL(turbo_asm_deoptimization, false,
             "enable deoptimization in TurboFan for asm.js code")
 DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
+DEFINE_BOOL(turbo_verify_machine_graph, false,
+            "verify TurboFan machine graph before instruction selection")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
 DEFINE_BOOL(turbo_stats_nvp, false,
             "print TurboFan statistics in machine-readable format")
@@ -487,9 +492,8 @@
             "enable instruction scheduling in TurboFan")
 DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
             "randomly schedule instructions to stress dependency tracking")
-DEFINE_BOOL(turbo_store_elimination, false,
+DEFINE_BOOL(turbo_store_elimination, true,
             "enable store-store elimination in TurboFan")
-DEFINE_IMPLICATION(turbo, turbo_store_elimination)
 
 // Flags to help platform porters
 DEFINE_BOOL(minimal, false,
@@ -529,6 +533,12 @@
             "enable prototype simd opcodes for wasm")
 DEFINE_BOOL(wasm_eh_prototype, false,
             "enable prototype exception handling opcodes for wasm")
+DEFINE_BOOL(wasm_mv_prototype, false,
+            "enable prototype multi-value support for wasm")
+
+DEFINE_BOOL(wasm_trap_handler, false,
+            "use signal handlers to catch out of bounds memory access in wasm"
+            " (currently Linux x86_64 only)")
 
 // Profiler flags.
 DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -548,6 +558,7 @@
 DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging")
 DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
 DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
+DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
 DEFINE_BOOL(enable_sse4_1, true,
             "enable use of SSE4.1 instructions if available")
 DEFINE_BOOL(enable_sahf, true,
@@ -559,35 +570,29 @@
 DEFINE_BOOL(enable_lzcnt, true, "enable use of LZCNT instruction if available")
 DEFINE_BOOL(enable_popcnt, true,
             "enable use of POPCNT instruction if available")
-DEFINE_BOOL(enable_vfp3, ENABLE_VFP3_DEFAULT,
-            "enable use of VFP3 instructions if available")
-DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT,
-            "enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_BOOL(enable_armv8, ENABLE_ARMV8_DEFAULT,
-            "enable use of ARMv8 instructions if available (ARM 32-bit only)")
-DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT,
-            "enable use of NEON instructions if available (ARM only)")
-DEFINE_BOOL(enable_sudiv, true,
-            "enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_BOOL(enable_movw_movt, false,
-            "enable loading 32-bit constant by means of movw/movt "
-            "instruction pairs (ARM only)")
-DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
-            "enable use of d16-d31 registers on ARM - this requires VFP3")
+DEFINE_STRING(arm_arch, ARM_ARCH_DEFAULT,
+              "generate instructions for the selected ARM architecture if "
+              "available: armv6, armv7, armv7+sudiv or armv8")
 DEFINE_BOOL(enable_vldr_imm, false,
             "enable use of constant pools for double immediate (ARM only)")
 DEFINE_BOOL(force_long_branches, false,
             "force all emitted branches to be in long mode (MIPS/PPC only)")
 DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
 
+// Deprecated ARM flags (replaced by arm_arch).
+DEFINE_MAYBE_BOOL(enable_armv7, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_vfp3, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_32dregs, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_neon, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_sudiv, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_armv8, "deprecated (use --arm_arch instead)")
+
 // regexp-macro-assembler-*.cc
 DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
             "enable unaligned accesses for the regexp engine")
 
-DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
-DEFINE_IMPLICATION(enable_armv8, enable_neon)
-DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
-DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
+// api.cc
+DEFINE_BOOL(script_streaming, true, "enable parsing on background")
 
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@@ -711,8 +716,6 @@
             "print one trace line following each idle notification")
 DEFINE_BOOL(trace_idle_notification_verbose, false,
             "prints the heap state used by the idle notification")
-DEFINE_BOOL(print_cumulative_gc_stat, false,
-            "print cumulative GC statistics in name=value format on exit")
 DEFINE_BOOL(print_max_heap_committed, false,
             "print statistics of the maximum memory committed for the heap "
             "in name=value format on exit")
@@ -736,7 +739,7 @@
             "track un-executed functions to age code and flush only "
             "old code (required for code flushing)")
 DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, true,
+DEFINE_BOOL(incremental_marking_wrappers, false,
             "use incremental marking for marking wrappers")
 DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
            "keep finalizing incremental marking as long as we discover at "
@@ -800,6 +803,7 @@
 DEFINE_BOOL(use_ic, true, "use inline caching")
 DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
 DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
+DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
 
 // macro-assembler-ia32.cc
 DEFINE_BOOL(native_code_counters, false,
@@ -835,6 +839,7 @@
 // parser.cc
 DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
 DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
+DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
 
 // simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
 DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -868,6 +873,10 @@
             "print stack trace when an illegal exception is thrown")
 DEFINE_BOOL(abort_on_uncaught_exception, false,
             "abort program (dump core) when an uncaught exception is thrown")
+DEFINE_BOOL(abort_on_stack_overflow, false,
+            "Abort program when stack overflow (as opposed to throwing "
+            "RangeError). This is useful for fuzzing where the spec behaviour "
+            "would introduce nondeterminism.")
 DEFINE_BOOL(randomize_hashes, true,
             "randomize hashes to avoid predictable hash collisions "
             "(with snapshots this option cannot override the baked-in seed)")
@@ -925,11 +934,6 @@
             "Test mode only flag. It allows an unit test to select evacuation "
             "candidates pages (requires --stress_compaction).")
 
-// api.cc
-DEFINE_INT(external_allocation_limit_incremental_time, 1,
-           "Time spent in incremental marking steps (in ms) once the external "
-           "allocation limit is reached")
-
 DEFINE_BOOL(disable_old_api_accessors, false,
             "Disable old-style API accessors whose setters trigger through the "
             "prototype chain")
@@ -1036,6 +1040,10 @@
 // Debugger
 DEFINE_BOOL(print_break_location, false, "print source location on debug break")
 
+// wasm instance management
+DEFINE_BOOL(trace_wasm_instances, false,
+            "trace creation and collection of wasm instances")
+
 //
 // Logging and profiling flags
 //
@@ -1126,6 +1134,7 @@
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_BOOL(print_code, false, "print generated code")
 DEFINE_BOOL(print_opt_code, false, "print optimized code")
+DEFINE_STRING(print_opt_code_filter, "*", "filter for printing optimized code")
 DEFINE_BOOL(print_unopt_code, false,
             "print unoptimized code before "
             "printing optimized code based on it")
@@ -1188,8 +1197,6 @@
             "enable in-object double fields unboxing (64-bit only)")
 DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
 
-DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
-
 
 // Cleanup...
 #undef FLAG_FULL
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 77784b8..61d0dcd 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -63,6 +63,8 @@
 
 
 inline Code* StackFrame::LookupCode() const {
+  // TODO(jgruber): This should really check that pc is within the returned
+  // code's instruction range [instruction_start(), instruction_end()[.
   return GetContainingCode(isolate(), pc());
 }
 
diff --git a/src/frames.cc b/src/frames.cc
index f0fa58d..c67fdc2 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1461,9 +1461,9 @@
 }
 
 Object* WasmFrame::wasm_obj() const {
-  FixedArray* deopt_data = LookupCode()->deoptimization_data();
-  DCHECK(deopt_data->length() == 2);
-  return deopt_data->get(0);
+  Object* ret = wasm::GetOwningWasmInstance(LookupCode());
+  if (ret == nullptr) ret = *(isolate()->factory()->undefined_value());
+  return ret;
 }
 
 uint32_t WasmFrame::function_index() const {
@@ -1478,6 +1478,15 @@
   return wasm::WasmDebugInfo::GetFunctionScript(debug_info, function_index());
 }
 
+int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+  DCHECK_NOT_NULL(stack_slots);
+  Code* code = LookupCode();
+  HandlerTable* table = HandlerTable::cast(code->handler_table());
+  int pc_offset = static_cast<int>(pc() - code->entry());
+  *stack_slots = code->stack_slots();
+  return table->LookupReturn(pc_offset);
+}
+
 namespace {
 
 
diff --git a/src/frames.h b/src/frames.h
index 1277023..373f4de 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -525,6 +525,8 @@
 
   Isolate* isolate() const { return isolate_; }
 
+  void operator=(const StackFrame& original) = delete;
+
  protected:
   inline explicit StackFrame(StackFrameIteratorBase* iterator);
   virtual ~StackFrame() { }
@@ -563,9 +565,6 @@
   friend class StackFrameIteratorBase;
   friend class StackHandlerIterator;
   friend class SafeStackFrameIterator;
-
- private:
-  void operator=(const StackFrame& original);
 };
 
 
@@ -1057,6 +1056,10 @@
   void Print(StringStream* accumulator, PrintMode mode,
              int index) const override;
 
+  // Lookup exception handler for current {pc}, returns -1 if none found. Also
+  // returns the stack slot count of the entire frame.
+  int LookupExceptionHandlerInTable(int* data);
+
   // Determine the code for the frame.
   Code* unchecked_code() const override;
 
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 7887d32..e8eeb8e 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/arm/code-stubs-arm.h"
 #include "src/arm/macro-assembler-arm.h"
@@ -126,6 +128,20 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+    __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+    __ ldr(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+    __ add(r9, r9, Operand(Smi::FromInt(1)));
+    __ str(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
+                                           kPointerSize +
+                                       TypeFeedbackVector::kHeaderSize));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -167,14 +183,14 @@
   bool function_in_register_r1 = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     // Argument to NewContext is the function, which is still in r1.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
       __ push(r1);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -259,9 +275,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register_r1) {
       __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -770,7 +785,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -821,7 +835,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1138,6 +1151,7 @@
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ pop(r0);
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
@@ -1160,12 +1174,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ ldr(StoreDescriptor::ValueRegister(),
          MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1174,12 +1185,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r0);
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ ldr(StoreDescriptor::ValueRegister(),
          MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1219,7 +1227,7 @@
   Register temp = r4;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1268,20 +1276,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ mov(LoadGlobalDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1292,7 +1286,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1415,10 +1408,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r0));
-            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1590,6 +1581,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1599,8 +1591,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1616,31 +1607,7 @@
 
     __ mov(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
     __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(r0);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(r0);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1992,7 +1959,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = r1;
@@ -2019,26 +1986,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -2075,10 +2039,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), r0);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2125,10 +2086,7 @@
       __ Move(StoreDescriptor::NameRegister(), r0);
       PopOperands(StoreDescriptor::ValueRegister(),
                   StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2153,10 +2111,8 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2173,10 +2129,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2192,7 +2148,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2213,13 +2170,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2230,11 +2180,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r0);
@@ -2276,10 +2223,7 @@
               StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(r0));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r0);
@@ -2839,24 +2783,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(r0, r1);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(r1);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -3048,7 +2974,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(r2);
           __ mov(r1, Operand(var->name()));
           __ Push(r2, r1);
@@ -3333,11 +3259,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3375,10 +3298,7 @@
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index a4f32da..1854f10 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/arm64/code-stubs-arm64.h"
 #include "src/arm64/frames-arm64.h"
@@ -130,6 +132,20 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
+    __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
+    __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+                                             kPointerSize +
+                                         TypeFeedbackVector::kHeaderSize));
+    __ Add(x10, x10, Operand(Smi::FromInt(1)));
+    __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+                                             kPointerSize +
+                                         TypeFeedbackVector::kHeaderSize));
+  }
+
   // Reserve space on the stack for locals.
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
@@ -170,13 +186,13 @@
 
   bool function_in_register_x1 = true;
 
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     // Argument to NewContext is the function, which is still in x1.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
-      __ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
+      __ Mov(x10, Operand(info->scope()->scope_info()));
       __ Push(x1, x10);
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
@@ -261,9 +277,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register_x1) {
       __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -765,7 +780,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -816,7 +830,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1126,6 +1139,7 @@
   // Generate code for going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ Bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   // TODO(all): We could use a callee saved register to avoid popping.
   __ Pop(x0);
   __ Add(x0, x0, Smi::FromInt(1));
@@ -1149,11 +1163,8 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Peek(StoreDescriptor::ReceiverRegister(), 0);
-  __ Mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1162,11 +1173,8 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), x0);
-  __ Mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1206,7 +1214,7 @@
   Register temp = x11;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1254,20 +1262,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ Mov(LoadGlobalDescriptor::SlotRegister(),
-         SmiFromSlot(proxy->VariableFeedbackSlot()));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1278,7 +1272,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1401,10 +1394,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(x0));
-            __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ Peek(StoreDescriptor::ReceiverRegister(), 0);
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1572,6 +1563,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1581,8 +1573,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1598,31 +1589,7 @@
 
     __ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
     __ Peek(StoreDescriptor::ReceiverRegister(), 0);
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(x0);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(x0);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1892,7 +1859,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = x1;
@@ -1919,26 +1886,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -1967,10 +1931,7 @@
       // this copy.
       __ Mov(StoreDescriptor::ReceiverRegister(), x0);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ Mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2017,10 +1978,7 @@
       __ Mov(StoreDescriptor::NameRegister(), x0);
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::ValueRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2046,10 +2004,8 @@
   ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2065,10 +2021,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ Bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2083,7 +2039,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2103,13 +2060,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2121,11 +2071,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ Mov(StoreDescriptor::NameRegister(),
-         Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(x0);
@@ -2170,10 +2117,7 @@
               StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(x0));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(x0);
@@ -2746,28 +2690,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  Register code = x0;
-  Register result = x1;
-
-  StringCharFromCodeGenerator generator(code, result);
-  generator.GenerateFast(masm_);
-  __ B(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ Bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -2971,7 +2893,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(x12);
           __ Mov(x11, Operand(var->name()));
           __ Push(x12, x11);
@@ -3254,11 +3176,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ Mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3296,10 +3215,7 @@
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index d83a23b..25d7f92 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -10,6 +10,7 @@
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/debug/liveedit.h"
@@ -25,15 +26,69 @@
 
 #define __ ACCESS_MASM(masm())
 
+class FullCodegenCompilationJob final : public CompilationJob {
+ public:
+  explicit FullCodegenCompilationJob(CompilationInfo* info)
+      : CompilationJob(info->isolate(), info, "Full-Codegen") {}
+
+  bool can_execute_on_background_thread() const override { return false; }
+
+  CompilationJob::Status PrepareJobImpl() final { return SUCCEEDED; }
+
+  CompilationJob::Status ExecuteJobImpl() final {
+    DCHECK(ThreadId::Current().Equals(isolate()->thread_id()));
+    return FullCodeGenerator::MakeCode(info(), stack_limit()) ? SUCCEEDED
+                                                              : FAILED;
+  }
+
+  CompilationJob::Status FinalizeJobImpl() final { return SUCCEEDED; }
+};
+
+FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
+                                     CompilationInfo* info,
+                                     uintptr_t stack_limit)
+    : masm_(masm),
+      info_(info),
+      isolate_(info->isolate()),
+      zone_(info->zone()),
+      scope_(info->scope()),
+      nesting_stack_(NULL),
+      loop_depth_(0),
+      operand_stack_depth_(0),
+      globals_(NULL),
+      context_(NULL),
+      bailout_entries_(info->HasDeoptimizationSupport()
+                           ? info->literal()->ast_node_count()
+                           : 0,
+                       info->zone()),
+      back_edges_(2, info->zone()),
+      handler_table_(info->zone()),
+      source_position_table_builder_(info->zone(),
+                                     info->SourcePositionRecordingMode()),
+      ic_total_count_(0) {
+  DCHECK(!info->IsStub());
+  Initialize(stack_limit);
+}
+
+// static
+CompilationJob* FullCodeGenerator::NewCompilationJob(CompilationInfo* info) {
+  return new FullCodegenCompilationJob(info);
+}
+
+// static
 bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
+  return MakeCode(info, info->isolate()->stack_guard()->real_climit());
+}
+
+// static
+bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
   Isolate* isolate = info->isolate();
 
   DCHECK(!FLAG_minimal);
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::CompileFullCode);
   TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::CompileFullCode);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileFullCode");
 
   Handle<Script> script = info->script();
   if (!script->IsUndefined(isolate) &&
@@ -47,7 +102,7 @@
                       CodeObjectRequired::kYes);
   if (info->will_serialize()) masm.enable_serializer();
 
-  FullCodeGenerator cgen(&masm, info);
+  FullCodeGenerator cgen(&masm, info, stack_limit);
   cgen.Generate();
   if (cgen.HasStackOverflow()) {
     DCHECK(!isolate->has_pending_exception());
@@ -157,9 +212,8 @@
          expr->values()->length() > JSArray::kInitialMaxFastElementArray;
 }
 
-
-void FullCodeGenerator::Initialize() {
-  InitializeAstVisitor(info_->isolate());
+void FullCodeGenerator::Initialize(uintptr_t stack_limit) {
+  InitializeAstVisitor(stack_limit);
   masm_->set_emit_debug_code(FLAG_debug_code);
   masm_->set_predictable_code_size(true);
 }
@@ -169,23 +223,52 @@
   PrepareForBailoutForId(node->id(), state);
 }
 
-void FullCodeGenerator::CallLoadIC(TypeFeedbackId id) {
+void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
+                                   TypeFeedbackId id) {
+  DCHECK(name->IsName());
+  __ Move(LoadDescriptor::NameRegister(), name);
+
+  EmitLoadSlot(LoadDescriptor::SlotRegister(), slot);
+
   Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
   CallIC(ic, id);
   if (FLAG_tf_load_ic_stub) RestoreContext();
 }
 
-void FullCodeGenerator::CallLoadGlobalIC(TypeofMode typeof_mode,
-                                         TypeFeedbackId id) {
-  Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
-  CallIC(ic, id);
-}
+void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
+                                    Handle<Object> name, TypeFeedbackId id) {
+  DCHECK(name->IsName());
+  __ Move(StoreDescriptor::NameRegister(), name);
 
-void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
+  STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
+                StoreDescriptor::kStackArgumentsCount == 2);
+  if (StoreDescriptor::kPassLastArgsOnStack) {
+    __ Push(StoreDescriptor::ValueRegister());
+    EmitPushSlot(slot);
+  } else {
+    EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
+  }
+
   Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
   CallIC(ic, id);
+  RestoreContext();
 }
 
+void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
+  STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
+                StoreDescriptor::kStackArgumentsCount == 2);
+  if (StoreDescriptor::kPassLastArgsOnStack) {
+    __ Push(StoreDescriptor::ValueRegister());
+    EmitPushSlot(slot);
+  } else {
+    EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
+  }
+
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+  CallIC(ic);
+  RestoreContext();
+}
 
 void FullCodeGenerator::RecordJSReturnSite(Call* call) {
   // We record the offset of the function return so we can rebuild the frame
@@ -411,6 +494,18 @@
   EmitVariableLoad(expr);
 }
 
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+                                               TypeofMode typeof_mode) {
+#ifdef DEBUG
+  Variable* var = proxy->var();
+  DCHECK(var->IsUnallocated() ||
+         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+#endif
+  EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
+               proxy->VariableFeedbackSlot());
+  Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+  CallIC(ic);
+}
 
 void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
     SloppyBlockFunctionStatement* declaration) {
@@ -473,6 +568,7 @@
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  RestoreContext();
   OperandStackDepthDecrement(3);
   context()->Plug(result_register());
 }
@@ -816,8 +912,8 @@
   DCHECK(!context()->IsEffect());
   DCHECK(!context()->IsTest());
 
-  if (proxy != NULL && (proxy->var()->IsUnallocatedOrGlobalSlot() ||
-                        proxy->var()->IsLookupSlot())) {
+  if (proxy != NULL &&
+      (proxy->var()->IsUnallocated() || proxy->var()->IsLookupSlot())) {
     EmitVariableLoad(proxy, INSIDE_TYPEOF);
     PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
   } else {
@@ -896,6 +992,7 @@
   // accumulator on the stack.
   ClearAccumulator();
   while (!current->IsContinueTarget(target)) {
+    if (HasStackOverflow()) return;
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred continue through finally");
       current->Exit(&context_length);
@@ -936,6 +1033,7 @@
   // accumulator on the stack.
   ClearAccumulator();
   while (!current->IsBreakTarget(target)) {
+    if (HasStackOverflow()) return;
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred break through finally");
       current->Exit(&context_length);
@@ -971,6 +1069,7 @@
   NestedStatement* current = nesting_stack_;
   int context_length = 0;
   while (current != NULL) {
+    if (HasStackOverflow()) return;
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred return through finally");
       current->Exit(&context_length);
@@ -1008,10 +1107,7 @@
   DCHECK(!key->value()->IsSmi());
   DCHECK(!prop->IsSuperAccess());
 
-  __ Move(LoadDescriptor::NameRegister(), key->value());
-  __ Move(LoadDescriptor::SlotRegister(),
-          SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC();
+  CallLoadIC(prop->PropertyFeedbackSlot(), key->value());
 }
 
 void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
@@ -1027,11 +1123,12 @@
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetExpressionPosition(prop);
+
+  EmitLoadSlot(LoadDescriptor::SlotRegister(), prop->PropertyFeedbackSlot());
+
   Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
-  __ Move(LoadDescriptor::SlotRegister(),
-          SmiFromSlot(prop->PropertyFeedbackSlot()));
   CallIC(ic);
-  if (FLAG_tf_load_ic_stub) RestoreContext();
+  RestoreContext();
 }
 
 void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
@@ -1040,7 +1137,7 @@
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 }
 
-void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
+void FullCodeGenerator::EmitPropertyKey(LiteralProperty* property,
                                         BailoutId bailout_id) {
   VisitForStackValue(property->key());
   CallRuntimeWithOperands(Runtime::kToName);
@@ -1048,9 +1145,14 @@
   PushOperand(result_register());
 }
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitLoadSlot(Register destination,
+                                     FeedbackVectorSlot slot) {
   DCHECK(!slot.IsInvalid());
-  __ Move(StoreDescriptor::SlotRegister(), SmiFromSlot(slot));
+  __ Move(destination, SmiFromSlot(slot));
+}
+
+void FullCodeGenerator::EmitPushSlot(FeedbackVectorSlot slot) {
+  __ Push(SmiFromSlot(slot));
 }
 
 void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
@@ -1073,6 +1175,7 @@
   RestoreContext();
   PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   PushOperand(result_register());
+  PushOperand(stmt->scope()->scope_info());
   PushFunctionArgumentForContextAllocation();
   CallRuntimeWithOperands(Runtime::kPushWithContext);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
@@ -1274,6 +1377,7 @@
   { Comment cmnt(masm_, "[ Extend catch context");
     PushOperand(stmt->variable()->name());
     PushOperand(result_register());
+    PushOperand(stmt->scope()->scope_info());
     PushFunctionArgumentForContextAllocation();
     CallRuntimeWithOperands(Runtime::kPushCatchContext);
     StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1466,9 +1570,7 @@
 
   // Load the "prototype" from the constructor.
   __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-  __ LoadRoot(LoadDescriptor::NameRegister(), Heap::kprototype_stringRootIndex);
-  __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
-  CallLoadIC();
+  CallLoadIC(lit->PrototypeSlot(), isolate()->factory()->prototype_string());
   PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
   PushOperand(result_register());
 
@@ -1847,7 +1949,7 @@
     {
       if (needs_block_context_) {
         Comment cmnt(masm(), "[ Extend block context");
-        codegen_->PushOperand(scope->GetScopeInfo(codegen->isolate()));
+        codegen_->PushOperand(scope->scope_info());
         codegen_->PushFunctionArgumentForContextAllocation();
         codegen_->CallRuntimeWithOperands(Runtime::kPushBlockContext);
 
@@ -1939,6 +2041,17 @@
          var->initializer_position() >= proxy->position();
 }
 
+Handle<Script> FullCodeGenerator::script() { return info_->script(); }
+
+LanguageMode FullCodeGenerator::language_mode() {
+  return scope()->language_mode();
+}
+
+bool FullCodeGenerator::has_simple_parameters() {
+  return info_->has_simple_parameters();
+}
+
+FunctionLiteral* FullCodeGenerator::literal() const { return info_->literal(); }
 
 #undef __
 
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 71f065b..2a4eb9d 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -13,7 +13,6 @@
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
-#include "src/compiler.h"
 #include "src/deoptimizer.h"
 #include "src/globals.h"
 #include "src/objects.h"
@@ -22,39 +21,24 @@
 namespace internal {
 
 // Forward declarations.
+class CompilationInfo;
+class CompilationJob;
 class JumpPatchSite;
+class Scope;
 
 // -----------------------------------------------------------------------------
 // Full code generator.
 
 class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
  public:
-  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
-      : masm_(masm),
-        info_(info),
-        isolate_(info->isolate()),
-        zone_(info->zone()),
-        scope_(info->scope()),
-        nesting_stack_(NULL),
-        loop_depth_(0),
-        operand_stack_depth_(0),
-        globals_(NULL),
-        context_(NULL),
-        bailout_entries_(info->HasDeoptimizationSupport()
-                             ? info->literal()->ast_node_count()
-                             : 0,
-                         info->zone()),
-        back_edges_(2, info->zone()),
-        handler_table_(info->zone()),
-        source_position_table_builder_(info->zone(),
-                                       info->SourcePositionRecordingMode()),
-        ic_total_count_(0) {
-    DCHECK(!info->IsStub());
-    Initialize();
-  }
+  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
+                    uintptr_t stack_limit);
 
-  void Initialize();
+  void Initialize(uintptr_t stack_limit);
 
+  static CompilationJob* NewCompilationJob(CompilationInfo* info);
+
+  static bool MakeCode(CompilationInfo* info, uintptr_t stack_limit);
   static bool MakeCode(CompilationInfo* info);
 
   // Encode bailout state and pc-offset as a BitField<type, start, size>.
@@ -493,7 +477,6 @@
   F(IsJSProxy)                          \
   F(Call)                               \
   F(NewObject)                          \
-  F(StringCharFromCode)                 \
   F(IsJSReceiver)                       \
   F(HasCachedArrayIndex)                \
   F(GetCachedArrayIndex)                \
@@ -572,7 +555,7 @@
   void EmitClassDefineProperties(ClassLiteral* lit);
 
   // Pushes the property key as a Name on the stack.
-  void EmitPropertyKey(ObjectLiteralProperty* property, BailoutId bailout_id);
+  void EmitPropertyKey(LiteralProperty* property, BailoutId bailout_id);
 
   // Apply the compound assignment operator. Expects the left operand on top
   // of the stack and the right one in the accumulator.
@@ -629,16 +612,19 @@
   void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
                                     FeedbackVectorSlot slot);
 
-  void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
+  // Platform-specific code for loading a slot to a register.
+  void EmitLoadSlot(Register destination, FeedbackVectorSlot slot);
+  // Platform-specific code for pushing a slot to the stack.
+  void EmitPushSlot(FeedbackVectorSlot slot);
 
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
-  void CallLoadIC(TypeFeedbackId id = TypeFeedbackId::None());
-  // Inside typeof reference errors are never thrown.
-  void CallLoadGlobalIC(TypeofMode typeof_mode,
-                        TypeFeedbackId id = TypeFeedbackId::None());
-  void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
+  void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
+                  TypeFeedbackId id = TypeFeedbackId::None());
+  void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name,
+                   TypeFeedbackId id = TypeFeedbackId::None());
+  void CallKeyedStoreIC(FeedbackVectorSlot slot);
 
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
@@ -695,10 +681,10 @@
 
   Isolate* isolate() const { return isolate_; }
   Zone* zone() const { return zone_; }
-  Handle<Script> script() { return info_->script(); }
-  LanguageMode language_mode() { return scope()->language_mode(); }
-  bool has_simple_parameters() { return info_->has_simple_parameters(); }
-  FunctionLiteral* literal() const { return info_->literal(); }
+  Handle<Script> script();
+  LanguageMode language_mode();
+  bool has_simple_parameters();
+  FunctionLiteral* literal() const;
   Scope* scope() { return scope_; }
 
   static Register context_register();
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index 3571948..e5f66cd 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,15 +4,17 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ia32/frames-ia32.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 namespace v8 {
 namespace internal {
@@ -115,6 +117,17 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+    __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
+    __ add(FieldOperand(
+               ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize),
+           Immediate(Smi::FromInt(1)));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -159,14 +172,14 @@
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     // Argument to NewContext is the function, which is still in edi.
     if (info->scope()->is_script_scope()) {
       __ push(edi);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -254,9 +267,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register) {
       __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -717,7 +729,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -767,7 +778,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1066,6 +1076,7 @@
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
 
   EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1086,11 +1097,8 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-  __ mov(StoreDescriptor::NameRegister(),
-         Immediate(isolate()->factory()->home_object_symbol()));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1099,11 +1107,8 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), eax);
-  __ mov(StoreDescriptor::NameRegister(),
-         Immediate(isolate()->factory()->home_object_symbol()));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1141,7 +1146,7 @@
   Register temp = ebx;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1189,20 +1194,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ mov(LoadGlobalDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
@@ -1212,7 +1203,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1339,10 +1329,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(eax));
-            __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1506,6 +1494,7 @@
     __ mov(ecx, Immediate(constant_elements));
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1515,8 +1504,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1533,31 +1521,7 @@
     __ mov(StoreDescriptor::NameRegister(),
            Immediate(Smi::FromInt(array_index)));
     __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(eax);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(eax);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
-
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
   }
@@ -1902,7 +1866,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
@@ -1927,25 +1891,25 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
+
+      case ClassLiteral::Property::FIELD:
+        UNREACHABLE();
+        break;
     }
   }
 }
@@ -1980,10 +1944,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), eax);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2030,10 +1991,7 @@
       __ Move(StoreDescriptor::NameRegister(), eax);
       PopOperand(StoreDescriptor::ReceiverRegister());  // Receiver.
       PopOperand(StoreDescriptor::ValueRegister());     // Restore value.
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2056,13 +2014,11 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), var->name());
     __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
     __ mov(StoreDescriptor::ReceiverRegister(),
            ContextOperand(StoreDescriptor::ReceiverRegister(),
                           Context::EXTENSION_INDEX));
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2078,10 +2034,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2096,7 +2052,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(Immediate(var->name()));
@@ -2117,13 +2074,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2136,10 +2086,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
@@ -2182,10 +2130,7 @@
   PopOperand(StoreDescriptor::NameRegister());  // Key.
   PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(eax));
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
@@ -2723,25 +2668,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(eax, ebx);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(ebx);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -2936,7 +2862,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ mov(eax, NativeContextOperand());
           __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
           __ push(Immediate(var->name()));
@@ -3230,11 +3156,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3272,10 +3195,7 @@
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         // Result is on the stack
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index 67598d0..7f97686 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,14 +12,16 @@
 // places where we have to move a previous result in v0 to a0 for the
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/mips/code-stubs-mips.h"
 #include "src/mips/macro-assembler-mips.h"
@@ -135,6 +137,20 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+    __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+    __ lw(t0, FieldMemOperand(
+                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize));
+    __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+    __ sw(t0, FieldMemOperand(
+                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -177,14 +193,14 @@
   bool function_in_register_a1 = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     Comment cmnt(masm_, "[ Allocate context");
     // Argument to NewContext is the function, which is still in a1.
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
       __ push(a1);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -269,9 +285,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register_a1) {
       __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -765,7 +780,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -816,7 +830,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1133,6 +1146,7 @@
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ pop(a0);
   __ Addu(a0, a0, Operand(Smi::FromInt(1)));
   __ push(a0);
@@ -1155,12 +1169,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(isolate()->factory()->home_object_symbol()));
   __ lw(StoreDescriptor::ValueRegister(),
         MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1169,12 +1180,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), v0);
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(isolate()->factory()->home_object_symbol()));
   __ lw(StoreDescriptor::ValueRegister(),
         MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1214,7 +1222,7 @@
   Register temp = t0;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1264,20 +1272,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ li(LoadGlobalDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1288,7 +1282,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1412,10 +1405,8 @@
             VisitForAccumulatorValue(value);
             __ mov(StoreDescriptor::ValueRegister(), result_register());
             DCHECK(StoreDescriptor::ValueRegister().is(a0));
-            __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1585,6 +1576,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1594,8 +1586,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1613,31 +1604,7 @@
     __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
     __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     __ mov(StoreDescriptor::ValueRegister(), result_register());
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(v0);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(v0);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1995,7 +1962,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = a1;
@@ -2022,26 +1989,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -2079,10 +2043,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ mov(StoreDescriptor::ReceiverRegister(), result_register());
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ li(StoreDescriptor::NameRegister(),
-            Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2129,10 +2090,7 @@
       __ mov(StoreDescriptor::NameRegister(), result_register());
       PopOperands(StoreDescriptor::ValueRegister(),
                   StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2158,10 +2116,8 @@
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ValueRegister(), result_register());
-    __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2178,10 +2134,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2197,7 +2153,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2218,13 +2175,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2236,11 +2186,8 @@
   DCHECK(prop->key()->IsLiteral());
 
   __ mov(StoreDescriptor::ValueRegister(), result_register());
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
@@ -2288,10 +2235,7 @@
               StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
@@ -2844,25 +2788,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(v0, a1);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(a1);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -3056,7 +2981,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(a2);
           __ li(a1, Operand(var->name()));
           __ Push(a2, a1);
@@ -3339,11 +3264,8 @@
       break;
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
-      __ li(StoreDescriptor::NameRegister(),
-            Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3382,10 +3304,7 @@
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index c149f13..660adb1 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,14 +12,16 @@
 // places where we have to move a previous result in v0 to a0 for the
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/mips64/code-stubs-mips64.h"
 #include "src/mips64/macro-assembler-mips64.h"
@@ -134,6 +136,20 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+    __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+    __ ld(a4, FieldMemOperand(
+                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize));
+    __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+    __ sd(a4, FieldMemOperand(
+                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -176,14 +192,14 @@
   bool function_in_register_a1 = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     Comment cmnt(masm_, "[ Allocate context");
     // Argument to NewContext is the function, which is still in a1.
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
       __ push(a1);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -267,9 +283,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register_a1) {
       __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -764,7 +779,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -815,7 +829,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1134,6 +1147,7 @@
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ pop(a0);
   __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
   __ push(a0);
@@ -1156,12 +1170,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(isolate()->factory()->home_object_symbol()));
   __ ld(StoreDescriptor::ValueRegister(),
         MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1170,12 +1181,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), v0);
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(isolate()->factory()->home_object_symbol()));
   __ ld(StoreDescriptor::ValueRegister(),
         MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1215,7 +1223,7 @@
   Register temp = a4;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1265,20 +1273,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ li(LoadGlobalDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1289,7 +1283,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1413,10 +1406,8 @@
             VisitForAccumulatorValue(value);
             __ mov(StoreDescriptor::ValueRegister(), result_register());
             DCHECK(StoreDescriptor::ValueRegister().is(a0));
-            __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1586,6 +1577,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1595,8 +1587,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1614,31 +1605,7 @@
     __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
     __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     __ mov(StoreDescriptor::ValueRegister(), result_register());
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(v0);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(v0);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1995,7 +1962,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = a1;
@@ -2022,26 +1989,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -2079,10 +2043,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ mov(StoreDescriptor::ReceiverRegister(), result_register());
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ li(StoreDescriptor::NameRegister(),
-            Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2129,10 +2090,7 @@
       __ Move(StoreDescriptor::NameRegister(), result_register());
       PopOperands(StoreDescriptor::ValueRegister(),
                   StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2158,10 +2116,8 @@
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(StoreDescriptor::ValueRegister(), result_register());
-    __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2178,10 +2134,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2197,7 +2153,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       __ Push(var->name());
       __ Push(v0);
@@ -2217,13 +2174,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2235,11 +2185,8 @@
   DCHECK(prop->key()->IsLiteral());
 
   __ mov(StoreDescriptor::ValueRegister(), result_register());
-  __ li(StoreDescriptor::NameRegister(),
-        Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
@@ -2287,10 +2234,7 @@
               StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
@@ -2843,25 +2787,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(v0, a1);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(a1);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -3055,7 +2980,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(a2);
           __ li(a1, Operand(var->name()));
           __ Push(a2, a1);
@@ -3339,11 +3264,8 @@
       break;
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
-      __ li(StoreDescriptor::NameRegister(),
-            Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3382,10 +3304,7 @@
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index 6813069..de9a8f4 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_PPC
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/ppc/code-stubs-ppc.h"
 #include "src/ppc/macro-assembler-ppc.h"
@@ -131,6 +133,22 @@
   info->set_prologue_offset(prologue_offset);
   __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+    __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
+    __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+                                             kPointerSize +
+                                         TypeFeedbackVector::kHeaderSize));
+    __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
+    __ StoreP(r8,
+              FieldMemOperand(
+                  r7, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize),
+              r0);
+  }
+
   {
     Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
@@ -173,14 +191,14 @@
   bool function_in_register_r4 = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     // Argument to NewContext is the function, which is still in r4.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
       __ push(r4);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -265,9 +283,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register_r4) {
       __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -732,7 +749,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -783,7 +799,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1102,6 +1117,7 @@
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ pop(r3);
   __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
   __ push(r3);
@@ -1124,12 +1140,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ LoadP(StoreDescriptor::ValueRegister(),
            MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1138,12 +1151,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r3);
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ LoadP(StoreDescriptor::ValueRegister(),
            MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1183,7 +1193,7 @@
   Register temp = r7;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1232,20 +1242,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ mov(LoadGlobalDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1256,7 +1252,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1379,10 +1374,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r3));
-            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1552,6 +1545,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1561,8 +1555,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
     // If the subexpression is a literal or a simple materialized literal it
@@ -1578,31 +1571,7 @@
     __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
                       Smi::FromInt(array_index));
     __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(r3);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(r3);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1998,7 +1967,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = r4;
@@ -2025,26 +1994,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -2081,10 +2047,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), r3);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2131,10 +2094,7 @@
       __ Move(StoreDescriptor::NameRegister(), r3);
       PopOperands(StoreDescriptor::ValueRegister(),
                   StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2159,10 +2119,8 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2179,10 +2137,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2198,7 +2156,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2219,12 +2178,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2235,11 +2188,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r3);
@@ -2281,10 +2231,7 @@
               StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(r3));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r3);
@@ -2838,24 +2785,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(r3, r4);
-  generator.GenerateFast(masm_);
-  __ b(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(r4);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -3048,7 +2977,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(r5);
           __ mov(r4, Operand(var->name()));
           __ Push(r5, r4);
@@ -3328,11 +3257,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3370,10 +3296,7 @@
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
index bd1509b..dfe6527 100644
--- a/src/full-codegen/s390/full-codegen-s390.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_S390
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 #include "src/s390/code-stubs-s390.h"
 #include "src/s390/macro-assembler-s390.h"
@@ -131,6 +133,21 @@
   info->set_prologue_offset(prologue_offset);
   __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+    __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
+    __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+                                             kPointerSize +
+                                         TypeFeedbackVector::kHeaderSize));
+    __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+    __ StoreP(r1,
+              FieldMemOperand(
+                  r6, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                          TypeFeedbackVector::kHeaderSize));
+  }
+
   {
     Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
@@ -178,14 +195,14 @@
   bool function_in_register_r3 = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     // Argument to NewContext is the function, which is still in r3.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (info->scope()->is_script_scope()) {
       __ push(r3);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -270,9 +287,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
 
     if (!function_in_register_r3) {
@@ -708,7 +724,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -758,7 +773,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1072,6 +1086,7 @@
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ pop(r2);
   __ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
   __ push(r2);
@@ -1093,12 +1108,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ LoadP(StoreDescriptor::ValueRegister(),
            MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
@@ -1106,12 +1118,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r2);
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(isolate()->factory()->home_object_symbol()));
   __ LoadP(StoreDescriptor::ValueRegister(),
            MemOperand(sp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
@@ -1149,7 +1158,7 @@
   Register temp = r6;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1197,18 +1206,6 @@
   }
 }
 
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ mov(LoadGlobalDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1219,7 +1216,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1340,10 +1336,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r2));
-            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1512,6 +1506,7 @@
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1521,8 +1516,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
     // If the subexpression is a literal or a simple materialized literal it
@@ -1538,31 +1532,7 @@
     __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
                       Smi::FromInt(array_index));
     __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(r2);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(r2);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1956,7 +1926,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     Register scratch = r3;
@@ -1983,26 +1953,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -2037,10 +2004,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), r2);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2087,10 +2051,7 @@
       __ Move(StoreDescriptor::NameRegister(), r2);
       PopOperands(StoreDescriptor::ValueRegister(),
                   StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2113,10 +2074,8 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     // Non-initializing assignment to let variable needs a write barrier.
@@ -2134,10 +2093,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2152,8 +2111,8 @@
     __ CallRuntime(Runtime::kThrowReferenceError);
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
-
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2174,12 +2133,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2189,11 +2142,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ mov(StoreDescriptor::NameRegister(),
-         Operand(prop->key()->AsLiteral()->value()));
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r2);
@@ -2232,10 +2182,7 @@
               StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(r2));
 
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r2);
@@ -2770,23 +2717,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(r2, r3);
-  generator.GenerateFast(masm_);
-  __ b(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(r3);
-}
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -2969,7 +2899,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ LoadGlobalObject(r4);
           __ mov(r3, Operand(var->name()));
           __ Push(r4, r3);
@@ -3248,11 +3178,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             Operand(prop->key()->AsLiteral()->value()));
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3290,10 +3217,7 @@
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index ce94a99..525319f 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 
 namespace v8 {
 namespace internal {
@@ -115,6 +117,18 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+    __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+    __ SmiAddConstant(
+        FieldOperand(rcx,
+                     TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                         TypeFeedbackVector::kHeaderSize),
+        Smi::FromInt(1));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -158,14 +172,14 @@
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     // Argument to NewContext is the function, which is still in rdi.
     if (info->scope()->is_script_scope()) {
       __ Push(rdi);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -249,9 +263,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register) {
       __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
@@ -730,7 +743,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -780,7 +792,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1092,6 +1103,7 @@
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
 
   EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1112,12 +1124,9 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
-  __ Move(StoreDescriptor::NameRegister(),
-          isolate()->factory()->home_object_symbol());
   __ movp(StoreDescriptor::ValueRegister(),
           Operand(rsp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1126,12 +1135,9 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ movp(StoreDescriptor::ReceiverRegister(), rax);
-  __ Move(StoreDescriptor::NameRegister(),
-          isolate()->factory()->home_object_symbol());
   __ movp(StoreDescriptor::ValueRegister(),
           Operand(rsp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1169,7 +1175,7 @@
   Register temp = rbx;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1217,20 +1223,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ Move(LoadGlobalDescriptor::SlotRegister(),
-          SmiFromSlot(proxy->VariableFeedbackSlot()));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1241,7 +1233,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1367,10 +1358,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(rax));
-            __ Move(StoreDescriptor::NameRegister(), key->value());
             __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1533,6 +1522,7 @@
     __ Move(rcx, constant_elements);
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1542,8 +1532,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1559,31 +1548,7 @@
 
     __ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
     __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(rax);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(rax);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
@@ -1893,7 +1858,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
@@ -1918,26 +1883,23 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
+      case ClassLiteral::Property::FIELD:
       default:
         UNREACHABLE();
     }
@@ -1974,10 +1936,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), rax);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ Move(StoreDescriptor::NameRegister(),
-              prop->key()->AsLiteral()->value());
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2024,10 +1983,7 @@
       __ Move(StoreDescriptor::NameRegister(), rax);
       PopOperand(StoreDescriptor::ReceiverRegister());
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2050,10 +2006,8 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ Move(StoreDescriptor::NameRegister(), var->name());
     __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2069,10 +2023,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
 
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
@@ -2088,7 +2042,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2109,13 +2064,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2126,10 +2074,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(rax);
@@ -2170,10 +2116,7 @@
   PopOperand(StoreDescriptor::NameRegister());  // Key.
   PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(rax));
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
 
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(rax);
@@ -2716,25 +2659,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(rax, rbx);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(rbx);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -2929,7 +2853,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ movp(rax, NativeContextOperand());
           __ Push(ContextOperand(rax, Context::EXTENSION_INDEX));
           __ Push(var->name());
@@ -3221,11 +3145,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ Move(StoreDescriptor::NameRegister(),
-              prop->key()->AsLiteral()->value());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3263,10 +3184,7 @@
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index 28c8960..47be8b0 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,14 +4,16 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
-#include "src/parsing/parser.h"
 #include "src/x87/frames-x87.h"
 
 namespace v8 {
@@ -115,6 +117,17 @@
   info->set_prologue_offset(masm_->pc_offset());
   __ Prologue(info->GeneratePreagedPrologue());
 
+  // Increment invocation count for the function.
+  {
+    Comment cmnt(masm_, "[ Increment invocation count");
+    __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+    __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
+    __ add(FieldOperand(
+               ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+                        TypeFeedbackVector::kHeaderSize),
+           Immediate(Smi::FromInt(1)));
+  }
+
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
@@ -159,14 +172,14 @@
   bool function_in_register = true;
 
   // Possibly allocate a local context.
-  if (info->scope()->num_heap_slots() > 0) {
+  if (info->scope()->NeedsContext()) {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     // Argument to NewContext is the function, which is still in edi.
     if (info->scope()->is_script_scope()) {
       __ push(edi);
-      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ Push(info->scope()->scope_info());
       __ CallRuntime(Runtime::kNewScriptContext);
       PrepareForBailoutForId(BailoutId::ScriptContext(),
                              BailoutState::TOS_REGISTER);
@@ -251,9 +264,8 @@
   }
 
   // Possibly allocate RestParameters
-  int rest_index;
-  Variable* rest_param = info->scope()->rest_parameter(&rest_index);
-  if (rest_param) {
+  Variable* rest_param = info->scope()->rest_parameter();
+  if (rest_param != nullptr) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
     if (!function_in_register) {
       __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -714,7 +726,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -763,7 +774,6 @@
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
@@ -1058,6 +1068,7 @@
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_label());
+  PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
 
   EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1078,11 +1089,8 @@
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-  __ mov(StoreDescriptor::NameRegister(),
-         Immediate(isolate()->factory()->home_object_symbol()));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1091,11 +1099,8 @@
                                                      FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), eax);
-  __ mov(StoreDescriptor::NameRegister(),
-         Immediate(isolate()->factory()->home_object_symbol()));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
-  EmitLoadStoreICSlot(slot);
-  CallStoreIC();
+  CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
 
@@ -1133,7 +1138,7 @@
   Register temp = ebx;
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
+    if (s->NeedsContext()) {
       if (s->calls_sloppy_eval()) {
         // Check that extension is "the hole".
         __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1181,20 +1186,6 @@
   }
 }
 
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
-                                               TypeofMode typeof_mode) {
-#ifdef DEBUG
-  Variable* var = proxy->var();
-  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  __ mov(LoadGlobalDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  CallLoadGlobalIC(typeof_mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
@@ -1204,7 +1195,6 @@
   // Three cases: global variables, lookup variables, and all other types of
   // variables.
   switch (var->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
       EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1331,10 +1321,8 @@
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(eax));
-            __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-            EmitLoadStoreICSlot(property->GetSlot(0));
-            CallStoreIC();
+            CallStoreIC(property->GetSlot(0), key->value());
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1498,6 +1486,7 @@
     __ mov(ecx, Immediate(constant_elements));
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
+    RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
@@ -1507,8 +1496,7 @@
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  int array_index = 0;
-  for (; array_index < length; array_index++) {
+  for (int array_index = 0; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
     DCHECK(!subexpr->IsSpread());
 
@@ -1525,31 +1513,7 @@
     __ mov(StoreDescriptor::NameRegister(),
            Immediate(Smi::FromInt(array_index)));
     __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
-    Handle<Code> ic =
-        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-    CallIC(ic);
-    PrepareForBailoutForId(expr->GetIdForElement(array_index),
-                           BailoutState::NO_REGISTERS);
-  }
-
-  // In case the array literal contains spread expressions it has two parts. The
-  // first part is  the "static" array which has a literal index is  handled
-  // above. The second part is the part after the first spread expression
-  // (inclusive) and these elements gets appended to the array. Note that the
-  // number elements an iterable produces is unknown ahead of time.
-  if (array_index < length && result_saved) {
-    PopOperand(eax);
-    result_saved = false;
-  }
-  for (; array_index < length; array_index++) {
-    Expression* subexpr = subexprs->at(array_index);
-
-    PushOperand(eax);
-    DCHECK(!subexpr->IsSpread());
-    VisitForStackValue(subexpr);
-    CallRuntimeWithOperands(Runtime::kAppendElement);
-
+    CallKeyedStoreIC(expr->LiteralFeedbackSlot());
     PrepareForBailoutForId(expr->GetIdForElement(array_index),
                            BailoutState::NO_REGISTERS);
   }
@@ -1894,7 +1858,7 @@
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
   for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
+    ClassLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
@@ -1919,25 +1883,25 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();
-      case ObjectLiteral::Property::COMPUTED:
+      case ClassLiteral::Property::METHOD:
         PushOperand(Smi::FromInt(DONT_ENUM));
         PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
         CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
-      case ObjectLiteral::Property::GETTER:
+      case ClassLiteral::Property::GETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
-      case ObjectLiteral::Property::SETTER:
+      case ClassLiteral::Property::SETTER:
         PushOperand(Smi::FromInt(DONT_ENUM));
         CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
+
+      case ClassLiteral::Property::FIELD:
+        UNREACHABLE();
+        break;
     }
   }
 }
@@ -1972,10 +1936,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), eax);
       PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
-      EmitLoadStoreICSlot(slot);
-      CallStoreIC();
+      CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2022,10 +1983,7 @@
       __ Move(StoreDescriptor::NameRegister(), eax);
       PopOperand(StoreDescriptor::ReceiverRegister());  // Receiver.
       PopOperand(StoreDescriptor::ValueRegister());     // Restore value.
-      EmitLoadStoreICSlot(slot);
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      CallIC(ic);
+      CallKeyedStoreIC(slot);
       break;
     }
   }
@@ -2048,13 +2006,11 @@
                                                FeedbackVectorSlot slot) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), var->name());
     __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
     __ mov(StoreDescriptor::ReceiverRegister(),
            ContextOperand(StoreDescriptor::ReceiverRegister(),
                           Context::EXTENSION_INDEX));
-    EmitLoadStoreICSlot(slot);
-    CallStoreIC();
+    CallStoreIC(slot, var->name());
 
   } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
     DCHECK(!var->IsLookupSlot());
@@ -2070,10 +2026,10 @@
       __ CallRuntime(Runtime::kThrowReferenceError);
       __ bind(&assign);
     }
-    if (var->mode() == CONST) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    } else {
+    if (var->mode() != CONST) {
       EmitStoreToStackLocalOrContextSlot(var, location);
+    } else if (var->throw_on_const_assignment(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
     }
   } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
     // Initializing assignment to const {this} needs a write barrier.
@@ -2088,7 +2044,8 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() || op == Token::INIT) {
+  } else {
+    DCHECK(var->mode() != CONST || op == Token::INIT);
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(Immediate(var->name()));
@@ -2109,13 +2066,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-
-  } else {
-    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
-    if (is_strict(language_mode())) {
-      __ CallRuntime(Runtime::kThrowConstAssignError);
-    }
-    // Silently ignore store in sloppy mode.
   }
 }
 
@@ -2128,10 +2078,8 @@
   DCHECK(prop != NULL);
   DCHECK(prop->key()->IsLiteral());
 
-  __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
   PopOperand(StoreDescriptor::ReceiverRegister());
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallStoreIC();
+  CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
@@ -2174,10 +2122,7 @@
   PopOperand(StoreDescriptor::NameRegister());  // Key.
   PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(eax));
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-  EmitLoadStoreICSlot(expr->AssignmentSlot());
-  CallIC(ic);
+  CallKeyedStoreIC(expr->AssignmentSlot());
   PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
@@ -2715,25 +2660,6 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(eax, ebx);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(ebx);
-}
-
-
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -2928,7 +2854,7 @@
         // "delete this" is allowed.
         bool is_this = var->is_this();
         DCHECK(is_sloppy(language_mode()) || is_this);
-        if (var->IsUnallocatedOrGlobalSlot()) {
+        if (var->IsUnallocated()) {
           __ mov(eax, NativeContextOperand());
           __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
           __ push(Immediate(var->name()));
@@ -3222,11 +3148,8 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallStoreIC();
+      CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3264,10 +3187,7 @@
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
-      EmitLoadStoreICSlot(expr->CountSlot());
-      CallIC(ic);
+      CallKeyedStoreIC(expr->CountSlot());
       PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         // Result is on the stack
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index a3af184..4e73981 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -9,7 +9,6 @@
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
-#include "src/compiler.h"
 #include "src/frames-inl.h"
 #include "src/frames.h"
 #include "src/global-handles.h"
@@ -2017,7 +2016,7 @@
 static base::HashMap* GetLineMap() {
   static base::HashMap* line_map = NULL;
   if (line_map == NULL) {
-    line_map = new base::HashMap(&base::HashMap::PointersMatch);
+    line_map = new base::HashMap();
   }
   return line_map;
 }
diff --git a/src/globals.h b/src/globals.h
index 0d02f77..03c5b1d 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -14,6 +14,32 @@
 #include "src/base/logging.h"
 #include "src/base/macros.h"
 
+#ifdef V8_OS_WIN
+
+// Setup for Windows shared library export.
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllimport)
+#else
+#define V8_EXPORT_PRIVATE
+#endif  // BUILDING_V8_SHARED
+
+#else  // V8_OS_WIN
+
+// Setup for Linux shared library export.
+#if V8_HAS_ATTRIBUTE_VISIBILITY
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+
+#endif  // V8_OS_WIN
+
 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
 // warning flag and certain versions of GCC due to a bug:
 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
@@ -161,10 +187,6 @@
 #if V8_OS_WIN
 const size_t kMinimumCodeRangeSize = 4 * MB;
 const size_t kReservedCodeRangePages = 1;
-// On PPC Linux PageSize is 4MB
-#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const size_t kMinimumCodeRangeSize = 12 * MB;
-const size_t kReservedCodeRangePages = 0;
 #else
 const size_t kMinimumCodeRangeSize = 3 * MB;
 const size_t kReservedCodeRangePages = 0;
@@ -193,9 +215,17 @@
 const size_t kReservedCodeRangePages = 0;
 #endif
 
-// The external allocation limit should be below 256 MB on all architectures
-// to avoid that resource-constrained embedders run low on memory.
-const int kExternalAllocationLimit = 192 * 1024 * 1024;
+// Trigger an incremental GCs once the external memory reaches this limit.
+const int kExternalAllocationSoftLimit = 64 * MB;
+
+// Maximum object size that gets allocated into regular pages. Objects larger
+// than that size are allocated in large object space and are never moved in
+// memory. This also applies to new space allocation, since objects are never
+// migrated from new space to large object space. Takes double alignment into
+// account.
+//
+// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
+const int kMaxRegularHeapObjectSize = 507136;
 
 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
 
@@ -722,6 +752,7 @@
 enum CpuFeature {
   // x86
   SSE4_1,
+  SSSE3,
   SSE3,
   SAHF,
   AVX,
@@ -732,13 +763,10 @@
   POPCNT,
   ATOM,
   // ARM
-  VFP3,
-  ARMv7,
-  ARMv8,
-  SUDIV,
-  MOVW_MOVT_IMMEDIATE_LOADS,
-  VFP32DREGS,
-  NEON,
+  // - Standard configurations. The baseline is ARMv6+VFPv2.
+  ARMv7,        // ARMv7-A + VFPv3-D32 + NEON
+  ARMv7_SUDIV,  // ARMv7-A + VFPv4-D32 + NEON + SUDIV
+  ARMv8,        // ARMv8-A (+ all of the above)
   // MIPS, MIPS64
   FPU,
   FP64FPU,
@@ -755,10 +783,14 @@
   DISTINCT_OPS,
   GENERAL_INSTR_EXT,
   FLOATING_POINT_EXT,
-  // PPC/S390
-  UNALIGNED_ACCESSES,
 
-  NUMBER_OF_CPU_FEATURES
+  NUMBER_OF_CPU_FEATURES,
+
+  // ARM feature aliases (based on the standard configurations above).
+  VFPv3 = ARMv7,
+  NEON = ARMv7,
+  VFP32DREGS = ARMv7,
+  SUDIV = ARMv7_SUDIV
 };
 
 // Defines hints about receiver values based on structural knowledge.
@@ -840,8 +872,7 @@
   DO_SMI_CHECK
 };
 
-
-enum ScopeType {
+enum ScopeType : uint8_t {
   EVAL_SCOPE,      // The top-level scope for an eval source.
   FUNCTION_SCOPE,  // The top-level scope for a function.
   MODULE_SCOPE,    // The scope introduced by a module literal
@@ -878,12 +909,10 @@
 
 
 // The order of this enum has to be kept in sync with the predicates below.
-enum VariableMode {
+enum VariableMode : uint8_t {
   // User declared variables:
   VAR,  // declared via 'var', and 'function' declarations
 
-  CONST_LEGACY,  // declared via legacy 'const' declarations
-
   LET,  // declared via 'let' declarations (first lexical)
 
   CONST,  // declared via 'const' declarations (last lexical)
@@ -899,10 +928,44 @@
                    // variable is global unless it has been shadowed
                    // by an eval-introduced variable
 
-  DYNAMIC_LOCAL  // requires dynamic lookup, but we know that the
-                 // variable is local and where it is unless it
-                 // has been shadowed by an eval-introduced
-                 // variable
+  DYNAMIC_LOCAL,  // requires dynamic lookup, but we know that the
+                  // variable is local and where it is unless it
+                  // has been shadowed by an eval-introduced
+                  // variable
+
+  kLastVariableMode = DYNAMIC_LOCAL
+};
+
+// Printing support
+#ifdef DEBUG
+inline const char* VariableMode2String(VariableMode mode) {
+  switch (mode) {
+    case VAR:
+      return "VAR";
+    case LET:
+      return "LET";
+    case CONST:
+      return "CONST";
+    case DYNAMIC:
+      return "DYNAMIC";
+    case DYNAMIC_GLOBAL:
+      return "DYNAMIC_GLOBAL";
+    case DYNAMIC_LOCAL:
+      return "DYNAMIC_LOCAL";
+    case TEMPORARY:
+      return "TEMPORARY";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+#endif
+
+enum VariableKind : uint8_t {
+  NORMAL_VARIABLE,
+  FUNCTION_VARIABLE,
+  THIS_VARIABLE,
+  SLOPPY_FUNCTION_NAME_VARIABLE,
+  kLastKind = SLOPPY_FUNCTION_NAME_VARIABLE
 };
 
 inline bool IsDynamicVariableMode(VariableMode mode) {
@@ -911,7 +974,8 @@
 
 
 inline bool IsDeclaredVariableMode(VariableMode mode) {
-  return mode >= VAR && mode <= CONST;
+  STATIC_ASSERT(VAR == 0);  // Implies that mode >= VAR.
+  return mode <= CONST;
 }
 
 
@@ -919,12 +983,7 @@
   return mode >= LET && mode <= CONST;
 }
 
-
-inline bool IsImmutableVariableMode(VariableMode mode) {
-  return mode == CONST || mode == CONST_LEGACY;
-}
-
-enum class VariableLocation {
+enum VariableLocation : uint8_t {
   // Before and during variable allocation, a variable whose location is
   // not yet determined.  After allocation, a variable looked up as a
   // property on the global object (and possibly absent).  name() is the
@@ -945,19 +1004,15 @@
   // corresponding scope.
   CONTEXT,
 
-  // An indexed slot in a script context that contains a respective global
-  // property cell.  name() is the variable name, index() is the variable
-  // index in the context object on the heap, starting at 0.  scope() is the
-  // corresponding script scope.
-  GLOBAL,
-
   // A named slot in a heap context.  name() is the variable name in the
   // context object on the heap, with lookup starting at the current
   // context.  index() is invalid.
   LOOKUP,
 
   // A named slot in a module's export table.
-  MODULE
+  MODULE,
+
+  kLastVariableLocation = MODULE
 };
 
 // ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
@@ -991,14 +1046,9 @@
 // The following enum specifies a flag that indicates if the binding needs a
 // distinct initialization step (kNeedsInitialization) or if the binding is
 // immediately initialized upon creation (kCreatedInitialized).
-enum InitializationFlag {
-  kNeedsInitialization,
-  kCreatedInitialized
-};
+enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
 
-
-enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
-
+enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
 
 // Serialized in PreparseData, so numeric values should not be changed.
 enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
@@ -1024,6 +1074,7 @@
   kGetterFunction = 1 << 6,
   kSetterFunction = 1 << 7,
   kAsyncFunction = 1 << 8,
+  kModule = 1 << 9,
   kAccessorFunction = kGetterFunction | kSetterFunction,
   kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
   kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
@@ -1037,6 +1088,7 @@
   return kind == FunctionKind::kNormalFunction ||
          kind == FunctionKind::kArrowFunction ||
          kind == FunctionKind::kGeneratorFunction ||
+         kind == FunctionKind::kModule ||
          kind == FunctionKind::kConciseMethod ||
          kind == FunctionKind::kConciseGeneratorMethod ||
          kind == FunctionKind::kGetterFunction ||
@@ -1063,13 +1115,18 @@
   return kind & FunctionKind::kGeneratorFunction;
 }
 
+inline bool IsModule(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kModule;
+}
+
 inline bool IsAsyncFunction(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
   return kind & FunctionKind::kAsyncFunction;
 }
 
 inline bool IsResumableFunction(FunctionKind kind) {
-  return IsGeneratorFunction(kind) || IsAsyncFunction(kind);
+  return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind);
 }
 
 inline bool IsConciseMethod(FunctionKind kind) {
@@ -1152,11 +1209,59 @@
 // at different points by performing an 'OR' operation. Type feedback moves
 // to a more generic type when we combine feedback.
 // kSignedSmall -> kNumber  -> kAny
+//                 kString  -> kAny
 class BinaryOperationFeedback {
  public:
+  enum {
+    kNone = 0x0,
+    kSignedSmall = 0x1,
+    kNumber = 0x3,
+    kString = 0x4,
+    kAny = 0xF
+  };
+};
+
+// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
+class CompareOperationFeedback {
+ public:
   enum { kNone = 0x00, kSignedSmall = 0x01, kNumber = 0x3, kAny = 0x7 };
 };
 
+// Describes how exactly a frame has been dropped from stack.
+enum LiveEditFrameDropMode {
+  // No frame has been dropped.
+  LIVE_EDIT_FRAMES_UNTOUCHED,
+  // The top JS frame had been calling debug break slot stub. Patch the
+  // address this stub jumps to in the end.
+  LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+  // The top JS frame had been calling some C++ function. The return address
+  // gets patched automatically.
+  LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL,
+  LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL,
+  LIVE_EDIT_CURRENTLY_SET_MODE
+};
+
+enum class UnicodeEncoding : uint8_t {
+  // Different unicode encodings in a |word32|:
+  UTF16,  // hi 16bits -> trailing surrogate or 0, low 16bits -> lead surrogate
+  UTF32,  // full UTF32 code unit / Unicode codepoint
+};
+
+inline size_t hash_value(UnicodeEncoding encoding) {
+  return static_cast<uint8_t>(encoding);
+}
+
+inline std::ostream& operator<<(std::ostream& os, UnicodeEncoding encoding) {
+  switch (encoding) {
+    case UnicodeEncoding::UTF16:
+      return os << "UTF16";
+    case UnicodeEncoding::UTF32:
+      return os << "UTF32";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/handles.h b/src/handles.h
index a7cd0e2..3587d85 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -10,7 +10,7 @@
 #include "src/base/macros.h"
 #include "src/checks.h"
 #include "src/globals.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -63,10 +63,12 @@
 
   enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
 #ifdef DEBUG
-  bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
+  bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const;
 #else
   V8_INLINE
-  bool IsDereferenceAllowed(DereferenceCheckMode mode) const { return true; }
+  bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const {
+    return true;
+  }
 #endif  // DEBUG
 
   Object** location_;
@@ -206,6 +208,10 @@
     USE(a);
   }
 
+  template <typename S>
+  V8_INLINE MaybeHandle(S* object, Isolate* isolate)
+      : MaybeHandle(handle(object, isolate)) {}
+
   V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
   V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
 
@@ -262,7 +268,7 @@
   inline ~HandleScope();
 
   // Counts the number of allocated handles.
-  static int NumberOfHandles(Isolate* isolate);
+  V8_EXPORT_PRIVATE static int NumberOfHandles(Isolate* isolate);
 
   // Create a new handle or lookup a canonical handle.
   V8_INLINE static Object** GetHandle(Isolate* isolate, Object* value);
@@ -271,7 +277,7 @@
   V8_INLINE static Object** CreateHandle(Isolate* isolate, Object* value);
 
   // Deallocates any extensions used by the current scope.
-  static void DeleteExtensions(Isolate* isolate);
+  V8_EXPORT_PRIVATE static void DeleteExtensions(Isolate* isolate);
 
   static Address current_next_address(Isolate* isolate);
   static Address current_limit_address(Isolate* isolate);
@@ -293,8 +299,6 @@
 
  private:
   // Prevent heap allocation or illegal handle scopes.
-  HandleScope(const HandleScope&);
-  void operator=(const HandleScope&);
   void* operator new(size_t size);
   void operator delete(void* size_t);
 
@@ -308,11 +312,11 @@
                                 Object** prev_limit);
 
   // Extend the handle scope making room for more handles.
-  static Object** Extend(Isolate* isolate);
+  V8_EXPORT_PRIVATE static Object** Extend(Isolate* isolate);
 
 #ifdef ENABLE_HANDLE_ZAPPING
   // Zaps the handles in the half-open interval [start, end).
-  static void ZapRange(Object** start, Object** end);
+  V8_EXPORT_PRIVATE static void ZapRange(Object** start, Object** end);
 #endif
 
   friend class v8::HandleScope;
@@ -320,6 +324,8 @@
   friend class DeferredHandleScope;
   friend class HandleScopeImplementer;
   friend class Isolate;
+
+  DISALLOW_COPY_AND_ASSIGN(HandleScope);
 };
 
 
@@ -340,7 +346,7 @@
   ~CanonicalHandleScope();
 
  private:
-  Object** Lookup(Object* object);
+  V8_EXPORT_PRIVATE Object** Lookup(Object* object);
 
   Isolate* isolate_;
   Zone zone_;
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index d83f63f..c7b3370 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -8,11 +8,11 @@
 #define INTERNALIZED_STRING_LIST(V)                                \
   V(anonymous_string, "anonymous")                                 \
   V(apply_string, "apply")                                         \
-  V(assign_string, "assign")                                       \
   V(arguments_string, "arguments")                                 \
   V(Arguments_string, "Arguments")                                 \
-  V(Array_string, "Array")                                         \
   V(arguments_to_string, "[object Arguments]")                     \
+  V(Array_string, "Array")                                         \
+  V(assign_string, "assign")                                       \
   V(array_to_string, "[object Array]")                             \
   V(boolean_to_string, "[object Boolean]")                         \
   V(date_to_string, "[object Date]")                               \
@@ -48,6 +48,8 @@
   V(construct_string, "construct")                                 \
   V(create_string, "create")                                       \
   V(Date_string, "Date")                                           \
+  V(dayperiod_string, "dayperiod")                                 \
+  V(day_string, "day")                                             \
   V(default_string, "default")                                     \
   V(defineProperty_string, "defineProperty")                       \
   V(deleteProperty_string, "deleteProperty")                       \
@@ -57,10 +59,12 @@
   V(dot_string, ".")                                               \
   V(entries_string, "entries")                                     \
   V(enumerable_string, "enumerable")                               \
+  V(era_string, "era")                                             \
   V(Error_string, "Error")                                         \
   V(eval_string, "eval")                                           \
   V(EvalError_string, "EvalError")                                 \
   V(false_string, "false")                                         \
+  V(flags_string, "flags")                                         \
   V(float32x4_string, "float32x4")                                 \
   V(Float32x4_string, "Float32x4")                                 \
   V(for_api_string, "for_api")                                     \
@@ -74,6 +78,8 @@
   V(get_string, "get")                                             \
   V(global_string, "global")                                       \
   V(has_string, "has")                                             \
+  V(hour_string, "hour")                                           \
+  V(ignoreCase_string, "ignoreCase")                               \
   V(illegal_access_string, "illegal access")                       \
   V(illegal_argument_string, "illegal argument")                   \
   V(index_string, "index")                                         \
@@ -92,10 +98,14 @@
   V(last_index_string, "lastIndex")                                \
   V(length_string, "length")                                       \
   V(line_string, "line")                                           \
+  V(literal_string, "literal")                                     \
   V(Map_string, "Map")                                             \
   V(message_string, "message")                                     \
   V(minus_infinity_string, "-Infinity")                            \
   V(minus_zero_string, "-0")                                       \
+  V(minute_string, "minute")                                       \
+  V(month_string, "month")                                         \
+  V(multiline_string, "multiline")                                 \
   V(name_string, "name")                                           \
   V(nan_string, "NaN")                                             \
   V(next_string, "next")                                           \
@@ -120,6 +130,7 @@
   V(ReferenceError_string, "ReferenceError")                       \
   V(RegExp_string, "RegExp")                                       \
   V(script_string, "script")                                       \
+  V(second_string, "second")                                       \
   V(setPrototypeOf_string, "setPrototypeOf")                       \
   V(set_string, "set")                                             \
   V(Set_string, "Set")                                             \
@@ -128,6 +139,7 @@
   V(sourceText_string, "sourceText")                               \
   V(source_url_string, "source_url")                               \
   V(stack_string, "stack")                                         \
+  V(stackTraceLimit_string, "stackTraceLimit")                     \
   V(strict_compare_ic_string, "===")                               \
   V(string_string, "string")                                       \
   V(String_string, "String")                                       \
@@ -137,10 +149,12 @@
   V(this_string, "this")                                           \
   V(throw_string, "throw")                                         \
   V(timed_out, "timed-out")                                        \
+  V(timeZoneName_string, "timeZoneName")                           \
   V(toJSON_string, "toJSON")                                       \
   V(toString_string, "toString")                                   \
   V(true_string, "true")                                           \
   V(TypeError_string, "TypeError")                                 \
+  V(type_string, "type")                                           \
   V(uint16x8_string, "uint16x8")                                   \
   V(Uint16x8_string, "Uint16x8")                                   \
   V(uint32x4_string, "uint32x4")                                   \
@@ -155,19 +169,16 @@
   V(value_string, "value")                                         \
   V(WeakMap_string, "WeakMap")                                     \
   V(WeakSet_string, "WeakSet")                                     \
-  V(writable_string, "writable")
+  V(weekday_string, "weekday")                                     \
+  V(writable_string, "writable")                                   \
+  V(year_string, "year")
 
 #define PRIVATE_SYMBOL_LIST(V)              \
   V(array_iteration_kind_symbol)            \
   V(array_iterator_next_symbol)             \
   V(array_iterator_object_symbol)           \
-  V(call_site_constructor_symbol)           \
-  V(call_site_function_symbol)              \
-  V(call_site_position_symbol)              \
-  V(call_site_receiver_symbol)              \
-  V(call_site_strict_symbol)                \
-  V(call_site_wasm_obj_symbol)              \
-  V(call_site_wasm_func_index_symbol)       \
+  V(call_site_frame_array_symbol)           \
+  V(call_site_frame_index_symbol)           \
   V(class_end_position_symbol)              \
   V(class_start_position_symbol)            \
   V(detailed_stack_trace_symbol)            \
@@ -189,10 +200,13 @@
   V(normal_ic_symbol)                       \
   V(not_mapped_symbol)                      \
   V(premonomorphic_symbol)                  \
-  V(promise_combined_deferred_symbol)       \
+  V(promise_async_stack_id_symbol)          \
   V(promise_debug_marker_symbol)            \
   V(promise_deferred_reactions_symbol)      \
+  V(promise_forwarding_handler_symbol)      \
   V(promise_fulfill_reactions_symbol)       \
+  V(promise_handled_by_symbol)              \
+  V(promise_handled_hint_symbol)            \
   V(promise_has_handler_symbol)             \
   V(promise_raw_symbol)                     \
   V(promise_reject_reactions_symbol)        \
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 695a259..8049ce4 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -23,11 +23,16 @@
 
 GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
     : tracer_(tracer), scope_(scope) {
+  // All accesses to incremental_marking_scope assume that incremental marking
+  // scopes come first.
+  STATIC_ASSERT(FIRST_INCREMENTAL_SCOPE == 0);
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (FLAG_runtime_call_stats) {
-    RuntimeCallStats::Enter(tracer_->heap_->isolate(), &timer_,
-                            &RuntimeCallStats::GC);
+  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+      FLAG_runtime_call_stats) {
+    RuntimeCallStats::Enter(
+        tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_,
+        &RuntimeCallStats::GC);
   }
 }
 
@@ -35,8 +40,10 @@
   tracer_->AddScopeSample(
       scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (FLAG_runtime_call_stats) {
-    RuntimeCallStats::Leave(tracer_->heap_->isolate(), &timer_);
+  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+      FLAG_runtime_call_stats) {
+    RuntimeCallStats::Leave(
+        tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_);
   }
 }
 
@@ -53,7 +60,7 @@
   return "(unknown)";
 }
 
-GCTracer::Event::Event(Type type, const char* gc_reason,
+GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
                        const char* collector_reason)
     : type(type),
       gc_reason(gc_reason),
@@ -69,10 +76,8 @@
       end_holes_size(0),
       new_space_object_size(0),
       survived_new_space_object_size(0),
-      cumulative_incremental_marking_bytes(0),
       incremental_marking_bytes(0),
-      cumulative_pure_incremental_marking_duration(0.0),
-      pure_incremental_marking_duration(0.0) {
+      incremental_marking_duration(0.0) {
   for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
     scopes[i] = 0;
   }
@@ -106,14 +111,11 @@
 
 GCTracer::GCTracer(Heap* heap)
     : heap_(heap),
-      current_(Event::START, nullptr, nullptr),
+      current_(Event::START, GarbageCollectionReason::kUnknown, nullptr),
       previous_(current_),
-      previous_incremental_mark_compactor_event_(current_),
-      cumulative_incremental_marking_bytes_(0),
-      cumulative_incremental_marking_duration_(0.0),
-      cumulative_pure_incremental_marking_duration_(0.0),
-      cumulative_marking_duration_(0.0),
-      cumulative_sweeping_duration_(0.0),
+      incremental_marking_bytes_(0),
+      incremental_marking_duration_(0.0),
+      recorded_incremental_marking_speed_(0.0),
       allocation_time_ms_(0.0),
       new_space_allocation_counter_bytes_(0),
       old_generation_allocation_counter_bytes_(0),
@@ -126,19 +128,10 @@
 }
 
 void GCTracer::ResetForTesting() {
-  current_ = Event(Event::START, NULL, NULL);
+  current_ = Event(Event::START, GarbageCollectionReason::kTesting, nullptr);
   current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
-  previous_ = previous_incremental_mark_compactor_event_ = current_;
-  cumulative_incremental_marking_bytes_ = 0.0;
-  cumulative_incremental_marking_duration_ = 0.0;
-  cumulative_pure_incremental_marking_duration_ = 0.0;
-  cumulative_marking_duration_ = 0.0;
-  for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
-    incremental_marking_scopes_[i].cumulative_duration = 0.0;
-    incremental_marking_scopes_[i].steps = 0;
-    incremental_marking_scopes_[i].longest_step = 0.0;
-  }
-  cumulative_sweeping_duration_ = 0.0;
+  previous_ = current_;
+  ResetIncrementalMarkingCounters();
   allocation_time_ms_ = 0.0;
   new_space_allocation_counter_bytes_ = 0.0;
   old_generation_allocation_counter_bytes_ = 0.0;
@@ -158,7 +151,8 @@
   start_counter_ = 0;
 }
 
-void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
+void GCTracer::Start(GarbageCollector collector,
+                     GarbageCollectionReason gc_reason,
                      const char* collector_reason) {
   start_counter_++;
   if (start_counter_ != 1) return;
@@ -167,8 +161,6 @@
   double start_time = heap_->MonotonicallyIncreasingTimeInMs();
   SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
                    heap_->OldGenerationAllocationCounter());
-  if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
-    previous_incremental_mark_compactor_event_ = current_;
 
   if (collector == SCAVENGER) {
     current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
@@ -189,10 +181,8 @@
   current_.new_space_object_size =
       heap_->new_space()->top() - heap_->new_space()->bottom();
 
-  current_.cumulative_incremental_marking_bytes =
-      cumulative_incremental_marking_bytes_;
-  current_.cumulative_pure_incremental_marking_duration =
-      cumulative_pure_incremental_marking_duration_;
+  current_.incremental_marking_bytes = 0;
+  current_.incremental_marking_duration = 0;
 
   for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
     current_.scopes[i] = 0;
@@ -200,37 +190,40 @@
 
   int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
   int used_memory = static_cast<int>(current_.start_object_size / KB);
-  heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
-      start_time, committed_memory);
-  heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
-      start_time, used_memory);
+
+  Counters* counters = heap_->isolate()->counters();
+
+  if (collector == SCAVENGER) {
+    counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
+  } else {
+    counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+  }
+  counters->aggregated_memory_heap_committed()->AddSample(start_time,
+                                                          committed_memory);
+  counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (FLAG_runtime_call_stats) {
-    RuntimeCallStats::Enter(heap_->isolate(), &timer_, &RuntimeCallStats::GC);
+  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+      FLAG_runtime_call_stats) {
+    RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(),
+                            &timer_, &RuntimeCallStats::GC);
   }
 }
 
-void GCTracer::MergeBaseline(const Event& baseline) {
-  current_.incremental_marking_bytes =
-      current_.cumulative_incremental_marking_bytes -
-      baseline.cumulative_incremental_marking_bytes;
-  current_.pure_incremental_marking_duration =
-      current_.cumulative_pure_incremental_marking_duration -
-      baseline.cumulative_pure_incremental_marking_duration;
-  for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
-       i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
-    current_.scopes[i] =
-        current_.incremental_marking_scopes[i].cumulative_duration -
-        baseline.incremental_marking_scopes[i].cumulative_duration;
+void GCTracer::ResetIncrementalMarkingCounters() {
+  incremental_marking_bytes_ = 0;
+  incremental_marking_duration_ = 0;
+  for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+    incremental_marking_scopes_[i].ResetCurrentCycle();
   }
 }
 
 void GCTracer::Stop(GarbageCollector collector) {
   start_counter_--;
   if (start_counter_ != 0) {
-    PrintIsolate(heap_->isolate(), "[Finished reentrant %s during %s.]\n",
-                 collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
-                 current_.TypeName(false));
+    heap_->isolate()->PrintWithTimestamp(
+        "[Finished reentrant %s during %s.]\n",
+        collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+        current_.TypeName(false));
     return;
   }
 
@@ -240,11 +233,6 @@
           (current_.type == Event::MARK_COMPACTOR ||
            current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
 
-  for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
-       i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
-    current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
-  }
-
   current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
   current_.end_object_size = heap_->SizeOfObjects();
   current_.end_memory_size = heap_->memory_allocator()->Size();
@@ -263,36 +251,33 @@
   double duration = current_.end_time - current_.start_time;
 
   if (current_.type == Event::SCAVENGER) {
-    MergeBaseline(previous_);
     recorded_scavenges_total_.Push(
         MakeBytesAndDuration(current_.new_space_object_size, duration));
     recorded_scavenges_survived_.Push(MakeBytesAndDuration(
         current_.survived_new_space_object_size, duration));
   } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
-    MergeBaseline(previous_incremental_mark_compactor_event_);
-    recorded_incremental_marking_steps_.Push(
-        MakeBytesAndDuration(current_.incremental_marking_bytes,
-                             current_.pure_incremental_marking_duration));
+    current_.incremental_marking_bytes = incremental_marking_bytes_;
+    current_.incremental_marking_duration = incremental_marking_duration_;
+    for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+      current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+      current_.scopes[i] = incremental_marking_scopes_[i].duration;
+    }
+    RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
+                                  current_.incremental_marking_duration);
     recorded_incremental_mark_compacts_.Push(
         MakeBytesAndDuration(current_.start_object_size, duration));
+    ResetIncrementalMarkingCounters();
     combined_mark_compact_speed_cache_ = 0.0;
-    for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
-      incremental_marking_scopes_[i].ResetCurrentCycle();
-    }
   } else {
-    DCHECK(current_.incremental_marking_bytes == 0);
-    DCHECK(current_.pure_incremental_marking_duration == 0);
+    DCHECK_EQ(0, current_.incremental_marking_bytes);
+    DCHECK_EQ(0, current_.incremental_marking_duration);
     recorded_mark_compacts_.Push(
         MakeBytesAndDuration(current_.start_object_size, duration));
+    ResetIncrementalMarkingCounters();
     combined_mark_compact_speed_cache_ = 0.0;
-    for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
-      incremental_marking_scopes_[i].ResetCurrentCycle();
-    }
   }
 
-  double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
-  heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
-                                      current_.scopes[Scope::MC_MARK]);
+  heap_->UpdateTotalGCTime(duration);
 
   if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
     return;
@@ -308,8 +293,10 @@
   }
 
   // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (FLAG_runtime_call_stats) {
-    RuntimeCallStats::Leave(heap_->isolate(), &timer_);
+  if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+      FLAG_runtime_call_stats) {
+    RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(),
+                            &timer_);
   }
 }
 
@@ -375,11 +362,9 @@
 
 
 void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
-  cumulative_incremental_marking_bytes_ += bytes;
-  cumulative_incremental_marking_duration_ += duration;
-  cumulative_marking_duration_ += duration;
   if (bytes > 0) {
-    cumulative_pure_incremental_marking_duration_ += duration;
+    incremental_marking_bytes_ += bytes;
+    incremental_marking_duration_ += duration;
   }
 }
 
@@ -402,29 +387,20 @@
   heap_->AddToRingBuffer(buffer.start());
 }
 
-
 void GCTracer::Print() const {
   double duration = current_.end_time - current_.start_time;
   const size_t kIncrementalStatsSize = 128;
   char incremental_buffer[kIncrementalStatsSize] = {0};
 
-  if (current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps > 0) {
-    if (current_.type == Event::SCAVENGER) {
-      base::OS::SNPrintF(
-          incremental_buffer, kIncrementalStatsSize,
-          " (+ %.1f ms in %d steps since last GC)",
-          current_.scopes[Scope::MC_INCREMENTAL],
-          current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps);
-    } else {
-      base::OS::SNPrintF(
-          incremental_buffer, kIncrementalStatsSize,
-          " (+ %.1f ms in %d steps since start of marking, "
-          "biggest step %.1f ms)",
-          current_.scopes[Scope::MC_INCREMENTAL],
-          current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
-          current_.incremental_marking_scopes[Scope::MC_INCREMENTAL]
-              .longest_step);
-    }
+  if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
+    base::OS::SNPrintF(
+        incremental_buffer, kIncrementalStatsSize,
+        " (+ %.1f ms in %d steps since start of marking, "
+        "biggest step %.1f ms, walltime since start of marking %.f ms)",
+        current_.scopes[Scope::MC_INCREMENTAL],
+        current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
+        current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].longest_step,
+        current_.end_time - incremental_marking_start_time_);
   }
 
   // Avoid PrintF as Output also appends the string to the tracing ring buffer
@@ -442,7 +418,7 @@
       static_cast<double>(current_.end_object_size) / MB,
       static_cast<double>(current_.end_memory_size) / MB, duration,
       TotalExternalTime(), incremental_buffer,
-      current_.gc_reason != nullptr ? current_.gc_reason : "",
+      Heap::GarbageCollectionReasonToString(current_.gc_reason),
       current_.collector_reason != nullptr ? current_.collector_reason : "");
 }
 
@@ -453,11 +429,16 @@
   intptr_t allocated_since_last_gc =
       current_.start_object_size - previous_.end_object_size;
 
+  double incremental_walltime_duration = 0;
+
+  if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
+    incremental_walltime_duration =
+        current_.end_time - incremental_marking_start_time_;
+  }
+
   switch (current_.type) {
     case Event::SCAVENGER:
-      PrintIsolate(
-          heap_->isolate(),
-          "%8.0f ms: "
+      heap_->isolate()->PrintWithTimestamp(
           "pause=%.1f "
           "mutator=%.1f "
           "gc=%s "
@@ -498,9 +479,8 @@
           "semi_space_copy_rate=%.1f%% "
           "new_space_allocation_throughput=%.1f "
           "context_disposal_rate=%.1f\n",
-          heap_->isolate()->time_millis_since_init(), duration,
-          spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
-          current_.scopes[Scope::SCAVENGER_SCAVENGE],
+          duration, spent_in_mutator, current_.TypeName(true),
+          current_.reduce_memory, current_.scopes[Scope::SCAVENGER_SCAVENGE],
           current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
           current_.scopes[Scope::SCAVENGER_WEAK],
           current_.scopes[Scope::SCAVENGER_ROOTS],
@@ -527,9 +507,7 @@
       break;
     case Event::MARK_COMPACTOR:
     case Event::INCREMENTAL_MARK_COMPACTOR:
-      PrintIsolate(
-          heap_->isolate(),
-          "%8.0f ms: "
+      heap_->isolate()->PrintWithTimestamp(
           "pause=%.1f "
           "mutator=%.1f "
           "gc=%s "
@@ -580,6 +558,7 @@
           "incremental.finalize.external.prologue=%.1f "
           "incremental.finalize.external.epilogue=%.1f "
           "incremental.finalize.object_grouping=%.1f "
+          "incremental.sweeping=%.1f "
           "incremental.wrapper_prologue=%.1f "
           "incremental.wrapper_tracing=%.1f "
           "incremental_wrapper_tracing_longest_step=%.1f "
@@ -588,6 +567,7 @@
           "incremental_longest_step=%.1f "
           "incremental_steps_count=%d "
           "incremental_marking_throughput=%.f "
+          "incremental_walltime_duration=%.f "
           "total_size_before=%" V8PRIdPTR
           " "
           "total_size_after=%" V8PRIdPTR
@@ -612,9 +592,8 @@
           "new_space_allocation_throughput=%.1f "
           "context_disposal_rate=%.1f "
           "compaction_speed=%.f\n",
-          heap_->isolate()->time_millis_since_init(), duration,
-          spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
-          current_.scopes[Scope::MC_CLEAR],
+          duration, spent_in_mutator, current_.TypeName(true),
+          current_.reduce_memory, current_.scopes[Scope::MC_CLEAR],
           current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
           current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
           current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
@@ -659,6 +638,7 @@
           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
           current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING],
+          current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
           current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE],
           current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING],
           current_
@@ -674,9 +654,10 @@
               .longest_step,
           current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
           IncrementalMarkingSpeedInBytesPerMillisecond(),
-          current_.start_object_size, current_.end_object_size,
-          current_.start_holes_size, current_.end_holes_size,
-          allocated_since_last_gc, heap_->promoted_objects_size(),
+          incremental_walltime_duration, current_.start_object_size,
+          current_.end_object_size, current_.start_holes_size,
+          current_.end_holes_size, allocated_since_last_gc,
+          heap_->promoted_objects_size(),
           heap_->semi_space_copied_object_size(),
           heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
           heap_->nodes_promoted_, heap_->promotion_ratio_,
@@ -716,15 +697,26 @@
   return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
 }
 
-double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
-  if (cumulative_incremental_marking_duration_ == 0.0) return 0;
-  // We haven't completed an entire round of incremental marking, yet.
-  // Use data from GCTracer instead of data from event buffers.
-  if (recorded_incremental_marking_steps_.Count() == 0) {
-    return cumulative_incremental_marking_bytes_ /
-           cumulative_pure_incremental_marking_duration_;
+void GCTracer::RecordIncrementalMarkingSpeed(intptr_t bytes, double duration) {
+  if (duration == 0 || bytes == 0) return;
+  double current_speed = bytes / duration;
+  if (recorded_incremental_marking_speed_ == 0) {
+    recorded_incremental_marking_speed_ = current_speed;
+  } else {
+    recorded_incremental_marking_speed_ =
+        (recorded_incremental_marking_speed_ + current_speed) / 2;
   }
-  return AverageSpeed(recorded_incremental_marking_steps_);
+}
+
+double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
+  const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
+  if (recorded_incremental_marking_speed_ != 0) {
+    return recorded_incremental_marking_speed_;
+  }
+  if (incremental_marking_duration_ != 0.0) {
+    return incremental_marking_bytes_ / incremental_marking_duration_;
+  }
+  return kConservativeSpeedInBytesPerMillisecond;
 }
 
 double GCTracer::ScavengeSpeedInBytesPerMillisecond(
@@ -821,5 +813,10 @@
 }
 
 void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); }
+
+void GCTracer::NotifyIncrementalMarkingStart() {
+  incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index a11823e..e8c72c1 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -63,6 +63,7 @@
 #define INCREMENTAL_SCOPES(F)                                      \
   /* MC_INCREMENTAL is the top-level incremental marking scope. */ \
   F(MC_INCREMENTAL)                                                \
+  F(MC_INCREMENTAL_SWEEPING)                                       \
   F(MC_INCREMENTAL_WRAPPER_PROLOGUE)                               \
   F(MC_INCREMENTAL_WRAPPER_TRACING)                                \
   F(MC_INCREMENTAL_FINALIZE)                                       \
@@ -134,23 +135,23 @@
 class GCTracer {
  public:
   struct IncrementalMarkingInfos {
-    IncrementalMarkingInfos()
-        : cumulative_duration(0), longest_step(0), steps(0) {}
+    IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
 
     void Update(double duration) {
       steps++;
-      cumulative_duration += duration;
+      this->duration += duration;
       if (duration > longest_step) {
         longest_step = duration;
       }
     }
 
     void ResetCurrentCycle() {
+      duration = 0;
       longest_step = 0;
       steps = 0;
     }
 
-    double cumulative_duration;
+    double duration;
     double longest_step;
     int steps;
   };
@@ -192,7 +193,8 @@
       START = 3
     };
 
-    Event(Type type, const char* gc_reason, const char* collector_reason);
+    Event(Type type, GarbageCollectionReason gc_reason,
+          const char* collector_reason);
 
     // Returns a string describing the event type.
     const char* TypeName(bool short_name) const;
@@ -200,7 +202,7 @@
     // Type of event
     Type type;
 
-    const char* gc_reason;
+    GarbageCollectionReason gc_reason;
     const char* collector_reason;
 
     // Timestamp set in the constructor.
@@ -219,10 +221,10 @@
     intptr_t end_object_size;
 
     // Size of memory allocated from OS set in constructor.
-    intptr_t start_memory_size;
+    size_t start_memory_size;
 
     // Size of memory allocated from OS set in destructor.
-    intptr_t end_memory_size;
+    size_t end_memory_size;
 
     // Total amount of space either wasted or contained in one of free lists
     // before the current GC.
@@ -241,21 +243,11 @@
     // Bytes marked since creation of tracer (value at start of event).
     intptr_t cumulative_incremental_marking_bytes;
 
-    // Bytes marked since
-    // - last event for SCAVENGER events
-    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
-    // events
+    // Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
     intptr_t incremental_marking_bytes;
 
-    // Cumulative pure duration of incremental marking steps since creation of
-    // tracer. (value at start of event)
-    double cumulative_pure_incremental_marking_duration;
-
-    // Duration of pure incremental marking steps since
-    // - last event for SCAVENGER events
-    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
-    // events
-    double pure_incremental_marking_duration;
+    // Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
+    double incremental_marking_duration;
 
     // Amounts of time spent in different scopes during GC.
     double scopes[Scope::NUMBER_OF_SCOPES];
@@ -270,7 +262,7 @@
   explicit GCTracer(Heap* heap);
 
   // Start collecting data.
-  void Start(GarbageCollector collector, const char* gc_reason,
+  void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
              const char* collector_reason);
 
   // Stop collecting data and print results.
@@ -292,26 +284,6 @@
   // Log an incremental marking step.
   void AddIncrementalMarkingStep(double duration, intptr_t bytes);
 
-  // Log time spent in marking.
-  void AddMarkingTime(double duration) {
-    cumulative_marking_duration_ += duration;
-  }
-
-  // Time spent in marking.
-  double cumulative_marking_duration() const {
-    return cumulative_marking_duration_;
-  }
-
-  // Log time spent in sweeping on main thread.
-  void AddSweepingTime(double duration) {
-    cumulative_sweeping_duration_ += duration;
-  }
-
-  // Time spent in sweeping on main thread.
-  double cumulative_sweeping_duration() const {
-    return cumulative_sweeping_duration_;
-  }
-
   // Compute the average incremental marking speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
   double IncrementalMarkingSpeedInBytesPerMillisecond() const;
@@ -381,11 +353,14 @@
   // Discard all recorded survival events.
   void ResetSurvivalEvents();
 
+  void NotifyIncrementalMarkingStart();
+
   V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
     DCHECK(scope < Scope::NUMBER_OF_SCOPES);
     if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
         scope <= Scope::LAST_INCREMENTAL_SCOPE) {
-      incremental_marking_scopes_[scope].Update(duration);
+      incremental_marking_scopes_[scope - Scope::FIRST_INCREMENTAL_SCOPE]
+          .Update(duration);
     } else {
       current_.scopes[scope] += duration;
     }
@@ -400,6 +375,7 @@
   FRIEND_TEST(GCTracerTest, RegularScope);
   FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
   FRIEND_TEST(GCTracerTest, IncrementalScope);
+  FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
 
   // Returns the average speed of the events in the buffer.
   // If the buffer is empty, the result is 0.
@@ -408,9 +384,9 @@
   static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
                              const BytesAndDuration& initial, double time_ms);
 
-  void MergeBaseline(const Event& baseline);
-
   void ResetForTesting();
+  void ResetIncrementalMarkingCounters();
+  void RecordIncrementalMarkingSpeed(intptr_t bytes, double duration);
 
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
@@ -444,37 +420,23 @@
   // Previous tracer event.
   Event previous_;
 
-  // Previous INCREMENTAL_MARK_COMPACTOR event.
-  Event previous_incremental_mark_compactor_event_;
+  // Size of incremental marking steps (in bytes) accumulated since the end of
+  // the last mark compact GC.
+  intptr_t incremental_marking_bytes_;
 
-  // Cumulative size of incremental marking steps (in bytes) since creation of
-  // tracer.
-  intptr_t cumulative_incremental_marking_bytes_;
+  // Duration of incremental marking steps since the end of the last mark-
+  // compact event.
+  double incremental_marking_duration_;
 
-  // Cumulative duration of incremental marking steps since creation of tracer.
-  double cumulative_incremental_marking_duration_;
+  double incremental_marking_start_time_;
 
-  // Cumulative duration of pure incremental marking steps since creation of
-  // tracer.
-  double cumulative_pure_incremental_marking_duration_;
-
-  // Total marking time.
-  // This timer is precise when run with --print-cumulative-gc-stat
-  double cumulative_marking_duration_;
+  double recorded_incremental_marking_speed_;
 
   // Incremental scopes carry more information than just the duration. The infos
   // here are merged back upon starting/stopping the GC tracer.
   IncrementalMarkingInfos
       incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
 
-  // Total sweeping time on the main thread.
-  // This timer is precise when run with --print-cumulative-gc-stat
-  // TODO(hpayer): Account for sweeping time on sweeper threads. Add a
-  // different field for that.
-  // TODO(hpayer): This timer right now just holds the sweeping time
-  // of the initial atomic sweeping pause. Make sure that it accumulates
-  // all sweeping operations performed on the main thread.
-  double cumulative_sweeping_duration_;
 
   // Timestamp and allocation counter at the last sampled allocation event.
   double allocation_time_ms_;
@@ -494,12 +456,11 @@
   // Separate timer used for --runtime_call_stats
   RuntimeCallTimer timer_;
 
-  RingBuffer<BytesAndDuration> recorded_incremental_marking_steps_;
   RingBuffer<BytesAndDuration> recorded_scavenges_total_;
   RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
   RingBuffer<BytesAndDuration> recorded_compactions_;
-  RingBuffer<BytesAndDuration> recorded_mark_compacts_;
   RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+  RingBuffer<BytesAndDuration> recorded_mark_compacts_;
   RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
   RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
   RingBuffer<double> recorded_context_disposal_times_;
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index 21f465f..23e1712 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -8,7 +8,7 @@
 #include <cmath>
 
 #include "src/base/platform/platform.h"
-#include "src/counters.h"
+#include "src/counters-inl.h"
 #include "src/heap/heap.h"
 #include "src/heap/incremental-marking-inl.h"
 #include "src/heap/mark-compact.h"
@@ -25,6 +25,16 @@
 namespace v8 {
 namespace internal {
 
+AllocationSpace AllocationResult::RetrySpace() {
+  DCHECK(IsRetry());
+  return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+HeapObject* AllocationResult::ToObjectChecked() {
+  CHECK(!IsRetry());
+  return HeapObject::cast(object_);
+}
+
 void PromotionQueue::insert(HeapObject* target, int32_t size,
                             bool was_marked_black) {
   if (emergency_stack_ != NULL) {
@@ -50,6 +60,62 @@
 #endif
 }
 
+void PromotionQueue::remove(HeapObject** target, int32_t* size,
+                            bool* was_marked_black) {
+  DCHECK(!is_empty());
+  if (front_ == rear_) {
+    Entry e = emergency_stack_->RemoveLast();
+    *target = e.obj_;
+    *size = e.size_;
+    *was_marked_black = e.was_marked_black_;
+    return;
+  }
+
+  struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
+  *target = entry->obj_;
+  *size = entry->size_;
+  *was_marked_black = entry->was_marked_black_;
+
+  // Assert no underflow.
+  SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                              reinterpret_cast<Address>(front_));
+}
+
+Page* PromotionQueue::GetHeadPage() {
+  return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
+}
+
+void PromotionQueue::SetNewLimit(Address limit) {
+  // If we are already using an emergency stack, we can ignore it.
+  if (emergency_stack_) return;
+
+  // If the limit is not on the same page, we can ignore it.
+  if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
+
+  limit_ = reinterpret_cast<struct Entry*>(limit);
+
+  if (limit_ <= rear_) {
+    return;
+  }
+
+  RelocateQueueHead();
+}
+
+bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
+  // If an emergency stack is used, the to-space address cannot interfere
+  // with the promotion queue.
+  if (emergency_stack_) return true;
+
+  // If the given to-space top pointer and the head of the promotion queue
+  // are not on the same page, then the to-space objects are below the
+  // promotion queue.
+  if (GetHeadPage() != Page::FromAddress(to_space_top)) {
+    return true;
+  }
+  // If the to space top pointer is smaller or equal than the promotion
+  // queue head, then the to-space objects are below the promotion queue.
+  return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
+}
 
 #define ROOT_ACCESSOR(type, name, camel_name) \
   type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
@@ -89,6 +155,37 @@
 ROOT_LIST(ROOT_ACCESSOR)
 #undef ROOT_ACCESSOR
 
+PagedSpace* Heap::paged_space(int idx) {
+  DCHECK_NE(idx, LO_SPACE);
+  DCHECK_NE(idx, NEW_SPACE);
+  return static_cast<PagedSpace*>(space_[idx]);
+}
+
+Space* Heap::space(int idx) { return space_[idx]; }
+
+Address* Heap::NewSpaceAllocationTopAddress() {
+  return new_space_->allocation_top_address();
+}
+
+Address* Heap::NewSpaceAllocationLimitAddress() {
+  return new_space_->allocation_limit_address();
+}
+
+Address* Heap::OldSpaceAllocationTopAddress() {
+  return old_space_->allocation_top_address();
+}
+
+Address* Heap::OldSpaceAllocationLimitAddress() {
+  return old_space_->allocation_limit_address();
+}
+
+void Heap::UpdateNewSpaceAllocationCounter() {
+  new_space_allocation_counter_ = NewSpaceAllocationCounter();
+}
+
+size_t Heap::NewSpaceAllocationCounter() {
+  return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+}
 
 template <>
 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
@@ -209,14 +306,14 @@
   isolate_->counters()->objs_since_last_young()->Increment();
 #endif
 
-  bool large_object = size_in_bytes > Page::kMaxRegularHeapObjectSize;
+  bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
   HeapObject* object = nullptr;
   AllocationResult allocation;
   if (NEW_SPACE == space) {
     if (large_object) {
       space = LO_SPACE;
     } else {
-      allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
+      allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
       if (allocation.To(&object)) {
         OnAllocationEvent(object, size_in_bytes);
       }
@@ -248,8 +345,6 @@
   }
   if (allocation.To(&object)) {
     OnAllocationEvent(object, size_in_bytes);
-  } else {
-    old_gen_exhausted_ = true;
   }
 
   return allocation;
@@ -355,9 +450,17 @@
   }
 }
 
+Address Heap::NewSpaceTop() { return new_space_->top(); }
+
+bool Heap::DeoptMaybeTenuredAllocationSites() {
+  return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+}
 
 bool Heap::InNewSpace(Object* object) {
-  bool result = new_space_.Contains(object);
+  // Inlined check from NewSpace::Contains.
+  bool result =
+      object->IsHeapObject() &&
+      Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
   DCHECK(!result ||                 // Either not in new space
          gc_state_ != NOT_IN_GC ||  // ... or in the middle of GC
          InToSpace(object));        // ... or in to-space (where we allocate).
@@ -365,35 +468,32 @@
 }
 
 bool Heap::InFromSpace(Object* object) {
-  return new_space_.FromSpaceContains(object);
+  return object->IsHeapObject() &&
+         MemoryChunk::FromAddress(HeapObject::cast(object)->address())
+             ->IsFlagSet(Page::IN_FROM_SPACE);
 }
 
 
 bool Heap::InToSpace(Object* object) {
-  return new_space_.ToSpaceContains(object);
+  return object->IsHeapObject() &&
+         MemoryChunk::FromAddress(HeapObject::cast(object)->address())
+             ->IsFlagSet(Page::IN_TO_SPACE);
 }
 
 bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
 
 bool Heap::InNewSpaceSlow(Address address) {
-  return new_space_.ContainsSlow(address);
+  return new_space_->ContainsSlow(address);
 }
 
 bool Heap::InOldSpaceSlow(Address address) {
   return old_space_->ContainsSlow(address);
 }
 
-bool Heap::OldGenerationAllocationLimitReached() {
-  if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
-    return false;
-  }
-  return OldGenerationSpaceAvailable() < 0;
-}
-
 template <PromotionMode promotion_mode>
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   Page* page = Page::FromAddress(old_address);
-  Address age_mark = new_space_.age_mark();
+  Address age_mark = new_space_->age_mark();
 
   if (promotion_mode == PROMOTE_MARKED) {
     MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
@@ -587,8 +687,8 @@
       site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
 }
 
-
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
+bool Heap::CollectGarbage(AllocationSpace space,
+                          GarbageCollectionReason gc_reason,
                           const v8::GCCallbackFlags callbackFlags) {
   const char* collector_reason = NULL;
   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -659,35 +759,6 @@
 #endif
 }
 
-// static
-int DescriptorLookupCache::Hash(Object* source, Name* name) {
-  DCHECK(name->IsUniqueName());
-  // Uses only lower 32 bits if pointers are larger.
-  uint32_t source_hash =
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
-      kPointerSizeLog2;
-  uint32_t name_hash = name->hash_field();
-  return (source_hash ^ name_hash) % kLength;
-}
-
-int DescriptorLookupCache::Lookup(Map* source, Name* name) {
-  int index = Hash(source, name);
-  Key& key = keys_[index];
-  if ((key.source == source) && (key.name == name)) return results_[index];
-  return kAbsent;
-}
-
-
-void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
-  DCHECK(result != kAbsent);
-  int index = Hash(source, name);
-  Key& key = keys_[index];
-  key.source = source;
-  key.name = name;
-  results_[index] = result;
-}
-
-
 void Heap::ClearInstanceofCache() {
   set_instanceof_cache_function(Smi::FromInt(0));
 }
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 7eb5af3..54b8589 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -71,14 +71,14 @@
 
 Heap::Heap()
     : external_memory_(0),
-      external_memory_limit_(kExternalAllocationLimit),
+      external_memory_limit_(kExternalAllocationSoftLimit),
       external_memory_at_last_mark_compact_(0),
       isolate_(nullptr),
       code_range_size_(0),
       // semispace_size_ should be a power of 2 and old_generation_size_ should
       // be a multiple of Page::kPageSize.
       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
-      initial_semispace_size_(Page::kPageSize),
+      initial_semispace_size_(MB),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
       initial_old_generation_size_(max_old_generation_size_ /
                                    kInitalOldGenerationLimitFactor),
@@ -96,7 +96,7 @@
       contexts_disposed_(0),
       number_of_disposed_maps_(0),
       global_ic_age_(0),
-      new_space_(this),
+      new_space_(nullptr),
       old_space_(NULL),
       code_space_(NULL),
       map_space_(NULL),
@@ -112,11 +112,9 @@
       allocation_timeout_(0),
 #endif  // DEBUG
       old_generation_allocation_limit_(initial_old_generation_size_),
-      old_gen_exhausted_(false),
       inline_allocation_disabled_(false),
       total_regexp_code_generated_(0),
       tracer_(nullptr),
-      high_survival_rate_period_length_(0),
       promoted_objects_size_(0),
       promotion_ratio_(0),
       semi_space_copied_object_size_(0),
@@ -126,12 +124,6 @@
       nodes_copied_in_new_space_(0),
       nodes_promoted_(0),
       maximum_size_scavenges_(0),
-      max_gc_pause_(0.0),
-      total_gc_time_ms_(0.0),
-      max_alive_after_gc_(0),
-      min_in_mutator_(kMaxInt),
-      marking_time_(0.0),
-      sweeping_time_(0.0),
       last_idle_notification_time_(0.0),
       last_gc_time_(0.0),
       scavenge_collector_(nullptr),
@@ -148,7 +140,7 @@
       full_codegen_bytes_generated_(0),
       crankshaft_codegen_bytes_generated_(0),
       new_space_allocation_counter_(0),
-      old_generation_allocation_counter_(0),
+      old_generation_allocation_counter_at_last_gc_(0),
       old_generation_size_at_last_gc_(0),
       gcs_since_last_deopt_(0),
       global_pretenuring_feedback_(nullptr),
@@ -163,6 +155,8 @@
       deserialization_complete_(false),
       strong_roots_list_(NULL),
       heap_iterator_depth_(0),
+      embedder_heap_tracer_(nullptr),
+      embedder_reference_reporter_(new TracePossibleWrapperReporter(this)),
       force_oom_(false) {
 // Allow build-time customization of the max semispace size. Building
 // V8 with snapshots and a non-default max semispace size is much
@@ -189,7 +183,7 @@
 intptr_t Heap::Capacity() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Capacity() + OldGenerationCapacity();
+  return new_space_->Capacity() + OldGenerationCapacity();
 }
 
 intptr_t Heap::OldGenerationCapacity() {
@@ -199,44 +193,41 @@
          map_space_->Capacity() + lo_space_->SizeOfObjects();
 }
 
-
-intptr_t Heap::CommittedOldGenerationMemory() {
+size_t Heap::CommittedOldGenerationMemory() {
   if (!HasBeenSetUp()) return 0;
 
   return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
          map_space_->CommittedMemory() + lo_space_->Size();
 }
 
-
-intptr_t Heap::CommittedMemory() {
+size_t Heap::CommittedMemory() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
+  return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
 }
 
 
 size_t Heap::CommittedPhysicalMemory() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.CommittedPhysicalMemory() +
+  return new_space_->CommittedPhysicalMemory() +
          old_space_->CommittedPhysicalMemory() +
          code_space_->CommittedPhysicalMemory() +
          map_space_->CommittedPhysicalMemory() +
          lo_space_->CommittedPhysicalMemory();
 }
 
-
-intptr_t Heap::CommittedMemoryExecutable() {
+size_t Heap::CommittedMemoryExecutable() {
   if (!HasBeenSetUp()) return 0;
 
-  return memory_allocator()->SizeExecutable();
+  return static_cast<size_t>(memory_allocator()->SizeExecutable());
 }
 
 
 void Heap::UpdateMaximumCommitted() {
   if (!HasBeenSetUp()) return;
 
-  intptr_t current_committed_memory = CommittedMemory();
+  const size_t current_committed_memory = CommittedMemory();
   if (current_committed_memory > maximum_committed_) {
     maximum_committed_ = current_committed_memory;
   }
@@ -275,22 +266,6 @@
     return MARK_COMPACTOR;
   }
 
-  // Is enough data promoted to justify a global GC?
-  if (OldGenerationAllocationLimitReached()) {
-    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
-    *reason = "promotion limit reached";
-    return MARK_COMPACTOR;
-  }
-
-  // Have allocation in OLD and LO failed?
-  if (old_gen_exhausted_) {
-    isolate_->counters()
-        ->gc_compactor_caused_by_oldspace_exhaustion()
-        ->Increment();
-    *reason = "old generations exhausted";
-    return MARK_COMPACTOR;
-  }
-
   // Is there enough space left in OLD to guarantee that a scavenge can
   // succeed?
   //
@@ -300,7 +275,8 @@
   // and does not count available bytes already in the old space or code
   // space.  Undercounting is safe---we may get an unrequested full GC when
   // a scavenge would have succeeded.
-  if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+  if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
+      new_space_->Size()) {
     isolate_->counters()
         ->gc_compactor_caused_by_oldspace_exhaustion()
         ->Increment();
@@ -321,18 +297,18 @@
 // compiled --log-gc is set.  The following logic is used to avoid
 // double logging.
 #ifdef DEBUG
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
   if (FLAG_heap_stats) {
     ReportHeapStatistics("Before GC");
   } else if (FLAG_log_gc) {
-    new_space_.ReportStatistics();
+    new_space_->ReportStatistics();
   }
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
 #else
   if (FLAG_log_gc) {
-    new_space_.CollectStatistics();
-    new_space_.ReportStatistics();
-    new_space_.ClearHistograms();
+    new_space_->CollectStatistics();
+    new_space_->ReportStatistics();
+    new_space_->ClearHistograms();
   }
 #endif  // DEBUG
 }
@@ -340,50 +316,51 @@
 
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
-  PrintIsolate(isolate_, "Memory allocator,   used: %6" V8PRIdPTR
-                         " KB, available: %6" V8PRIdPTR " KB\n",
+  PrintIsolate(isolate_,
+               "Memory allocator,   used: %6zu KB,"
+               " available: %6zu KB\n",
                memory_allocator()->Size() / KB,
                memory_allocator()->Available() / KB);
   PrintIsolate(isolate_, "New space,          used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
-               new_space_.Size() / KB, new_space_.Available() / KB,
-               new_space_.CommittedMemory() / KB);
+                         ", committed: %6zu KB\n",
+               new_space_->Size() / KB, new_space_->Available() / KB,
+               new_space_->CommittedMemory() / KB);
   PrintIsolate(isolate_, "Old space,          used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
+                         ", committed: %6zu KB\n",
                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
                old_space_->CommittedMemory() / KB);
   PrintIsolate(isolate_, "Code space,         used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
+                         ", committed: %6zu KB\n",
                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
                code_space_->CommittedMemory() / KB);
   PrintIsolate(isolate_, "Map space,          used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
+                         ", committed: %6zu KB\n",
                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
                map_space_->CommittedMemory() / KB);
   PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
+                         ", committed: %6zu KB\n",
                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
                lo_space_->CommittedMemory() / KB);
   PrintIsolate(isolate_, "All spaces,         used: %6" V8PRIdPTR
                          " KB"
                          ", available: %6" V8PRIdPTR
                          " KB"
-                         ", committed: %6" V8PRIdPTR " KB\n",
+                         ", committed: %6zu KB\n",
                this->SizeOfObjects() / KB, this->Available() / KB,
                this->CommittedMemory() / KB);
   PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
@@ -399,13 +376,13 @@
 // NewSpace statistics are logged exactly once when --log-gc is turned on.
 #if defined(DEBUG)
   if (FLAG_heap_stats) {
-    new_space_.CollectStatistics();
+    new_space_->CollectStatistics();
     ReportHeapStatistics("After GC");
   } else if (FLAG_log_gc) {
-    new_space_.ReportStatistics();
+    new_space_->ReportStatistics();
   }
 #else
-  if (FLAG_log_gc) new_space_.ReportStatistics();
+  if (FLAG_log_gc) new_space_->ReportStatistics();
 #endif  // DEBUG
   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
        ++i) {
@@ -423,6 +400,7 @@
   deferred_counters_[feature]++;
 }
 
+bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
 
 void Heap::GarbageCollectionPrologue() {
   {
@@ -454,7 +432,7 @@
   ReportStatisticsBeforeGC();
 #endif  // DEBUG
 
-  if (new_space_.IsAtMaximumCapacity()) {
+  if (new_space_->IsAtMaximumCapacity()) {
     maximum_size_scavenges_++;
   } else {
     maximum_size_scavenges_ = 0;
@@ -534,8 +512,8 @@
 class Heap::PretenuringScope {
  public:
   explicit PretenuringScope(Heap* heap) : heap_(heap) {
-    heap_->global_pretenuring_feedback_ = new base::HashMap(
-        base::HashMap::PointersMatch, kInitialFeedbackCapacity);
+    heap_->global_pretenuring_feedback_ =
+        new base::HashMap(kInitialFeedbackCapacity);
   }
 
   ~PretenuringScope() {
@@ -789,14 +767,16 @@
   } else if (incremental_marking()->request_type() ==
              IncrementalMarking::COMPLETE_MARKING) {
     incremental_marking()->reset_request_type();
-    CollectAllGarbage(current_gc_flags_, "GC interrupt",
+    CollectAllGarbage(current_gc_flags_,
+                      GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
                       current_gc_callback_flags_);
   } else if (incremental_marking()->request_type() ==
                  IncrementalMarking::FINALIZATION &&
              incremental_marking()->IsMarking() &&
              !incremental_marking()->finalize_marking_completed()) {
     incremental_marking()->reset_request_type();
-    FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
+    FinalizeIncrementalMarking(
+        GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
   }
 }
 
@@ -805,10 +785,11 @@
   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
 }
 
-
-void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
+void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] (%s).\n", gc_reason);
+    isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] (%s).\n",
+        Heap::GarbageCollectionReasonToString(gc_reason));
   }
 
   HistogramTimerScope incremental_marking_scope(
@@ -856,7 +837,7 @@
   }
 }
 
-void Heap::CollectAllGarbage(int flags, const char* gc_reason,
+void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
                              const v8::GCCallbackFlags gc_callback_flags) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
@@ -866,8 +847,7 @@
   set_current_gc_flags(kNoGCFlags);
 }
 
-
-void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
+void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
@@ -897,36 +877,46 @@
     }
   }
   set_current_gc_flags(kNoGCFlags);
-  new_space_.Shrink();
+  new_space_->Shrink();
   UncommitFromSpace();
 }
 
-
-void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
+void Heap::ReportExternalMemoryPressure() {
+  if (external_memory_ >
+      (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
+    CollectAllGarbage(
+        kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
+        GarbageCollectionReason::kExternalMemoryPressure,
+        static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
+                                     kGCCallbackFlagCollectAllExternalMemory));
+    return;
+  }
   if (incremental_marking()->IsStopped()) {
     if (incremental_marking()->CanBeActivated()) {
       StartIncrementalMarking(
-          i::Heap::kNoGCFlags,
+          i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
           static_cast<GCCallbackFlags>(
               kGCCallbackFlagSynchronousPhantomCallbackProcessing |
-              kGCCallbackFlagCollectAllExternalMemory),
-          gc_reason);
+              kGCCallbackFlagCollectAllExternalMemory));
     } else {
-      CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
+      CollectAllGarbage(i::Heap::kNoGCFlags,
+                        GarbageCollectionReason::kExternalMemoryPressure,
                         kGCCallbackFlagSynchronousPhantomCallbackProcessing);
     }
   } else {
     // Incremental marking is turned on an has already been started.
-
-    // TODO(mlippautz): Compute the time slice for incremental marking based on
-    // memory pressure.
-    double deadline = MonotonicallyIncreasingTimeInMs() +
-                      FLAG_external_allocation_limit_incremental_time;
+    const double pressure =
+        static_cast<double>(external_memory_ -
+                            external_memory_at_last_mark_compact_ -
+                            kExternalAllocationSoftLimit) /
+        external_memory_hard_limit();
+    DCHECK_GE(1, pressure);
+    const double kMaxStepSizeOnExternalLimit = 25;
+    const double deadline = MonotonicallyIncreasingTimeInMs() +
+                            pressure * kMaxStepSizeOnExternalLimit;
     incremental_marking()->AdvanceIncrementalMarking(
-        deadline,
-        IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
-                                        IncrementalMarking::FORCE_MARKING,
-                                        IncrementalMarking::FORCE_COMPLETION));
+        deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
+        IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
   }
 }
 
@@ -936,7 +926,7 @@
   // evacuation of a non-full new space (or if we are on the last page) there
   // may be uninitialized memory behind top. We fill the remainder of the page
   // with a filler.
-  Address to_top = new_space_.top();
+  Address to_top = new_space_->top();
   Page* page = Page::FromAddress(to_top - kPointerSize);
   if (page->Contains(to_top)) {
     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
@@ -944,8 +934,8 @@
   }
 }
 
-
-bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
+bool Heap::CollectGarbage(GarbageCollector collector,
+                          GarbageCollectionReason gc_reason,
                           const char* collector_reason,
                           const v8::GCCallbackFlags gc_callback_flags) {
   // The VM is in the GC state until exiting this function.
@@ -964,19 +954,21 @@
 
   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
     if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+      isolate()->PrintWithTimestamp(
+          "[IncrementalMarking] Scavenge during marking.\n");
     }
   }
 
   if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
       !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
       !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
-      OldGenerationAllocationLimitReached()) {
+      OldGenerationSpaceAvailable() <= 0) {
     if (!incremental_marking()->IsComplete() &&
-        !mark_compact_collector()->marking_deque_.IsEmpty() &&
+        !mark_compact_collector()->marking_deque()->IsEmpty() &&
         !FLAG_gc_global) {
       if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+        isolate()->PrintWithTimestamp(
+            "[IncrementalMarking] Delaying MarkSweep.\n");
       }
       collector = SCAVENGER;
       collector_reason = "incremental marking delaying mark-sweep";
@@ -1041,9 +1033,11 @@
 
   // Start incremental marking for the next cycle. The heap snapshot
   // generator needs incremental marking to stay off after it aborted.
-  if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
-      incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
-    StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
+  // We do this only for scavenger to avoid a loop where mark-compact
+  // causes another mark-compact.
+  if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) {
+    StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
+                                                      kNoGCCallbackFlags);
   }
 
   return next_gc_likely_to_collect_more;
@@ -1069,21 +1063,33 @@
   return ++contexts_disposed_;
 }
 
-
 void Heap::StartIncrementalMarking(int gc_flags,
-                                   const GCCallbackFlags gc_callback_flags,
-                                   const char* reason) {
+                                   GarbageCollectionReason gc_reason,
+                                   GCCallbackFlags gc_callback_flags) {
   DCHECK(incremental_marking()->IsStopped());
   set_current_gc_flags(gc_flags);
   current_gc_callback_flags_ = gc_callback_flags;
-  incremental_marking()->Start(reason);
+  incremental_marking()->Start(gc_reason);
 }
 
+void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
+    int gc_flags, const GCCallbackFlags gc_callback_flags) {
+  if (incremental_marking()->IsStopped()) {
+    IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
+    if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
+      incremental_marking()->incremental_marking_job()->ScheduleTask(this);
+    } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
+      StartIncrementalMarking(gc_flags,
+                              GarbageCollectionReason::kAllocationLimit,
+                              gc_callback_flags);
+    }
+  }
+}
 
-void Heap::StartIdleIncrementalMarking() {
+void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
   gc_idle_time_handler_->ResetNoProgressCounter();
-  StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
-                          "idle");
+  StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
+                          kNoGCCallbackFlags);
 }
 
 
@@ -1192,17 +1198,15 @@
       }
       if (perform_gc) {
         if (space == NEW_SPACE) {
-          CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
+          CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
         } else {
           if (counter > 1) {
             CollectAllGarbage(
                 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
-                "failed to reserve space in paged or large "
-                "object space, trying to reduce memory footprint");
+                GarbageCollectionReason::kDeserializer);
           } else {
-            CollectAllGarbage(
-                kAbortIncrementalMarkingMask,
-                "failed to reserve space in paged or large object space");
+            CollectAllGarbage(kAbortIncrementalMarkingMask,
+                              GarbageCollectionReason::kDeserializer);
           }
         }
         gc_performed = true;
@@ -1216,7 +1220,7 @@
 
 
 void Heap::EnsureFromSpaceIsCommitted() {
-  if (new_space_.CommitFromSpaceIfNeeded()) return;
+  if (new_space_->CommitFromSpaceIfNeeded()) return;
 
   // Committing memory to from space failed.
   // Memory is exhausted and we will die.
@@ -1264,11 +1268,6 @@
 
   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
   tracer()->AddSurvivalRatio(survival_rate);
-  if (survival_rate > kYoungSurvivalRateHighThreshold) {
-    high_survival_rate_period_length_++;
-  } else {
-    high_survival_rate_period_length_ = 0;
-  }
 }
 
 bool Heap::PerformGarbageCollection(
@@ -1303,14 +1302,7 @@
 
   EnsureFromSpaceIsCommitted();
 
-  int start_new_space_size = Heap::new_space()->SizeAsInt();
-
-  if (IsHighSurvivalRate()) {
-    // We speed up the incremental marker if it is running so that it
-    // does not fall behind the rate of promotion, which would cause a
-    // constantly growing old space.
-    incremental_marking()->NotifyOfHighPromotionRate();
-  }
+  int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
 
   {
     Heap::PretenuringScope pretenuring_scope(this);
@@ -1319,11 +1311,10 @@
       UpdateOldGenerationAllocationCounter();
       // Perform mark-sweep with optional compaction.
       MarkCompact();
-      old_gen_exhausted_ = false;
       old_generation_size_configured_ = true;
       // This should be updated before PostGarbageCollectionProcessing, which
       // can cause another GC. Take into account the objects promoted during GC.
-      old_generation_allocation_counter_ +=
+      old_generation_allocation_counter_at_last_gc_ +=
           static_cast<size_t>(promoted_objects_size_);
       old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
     } else {
@@ -1360,7 +1351,7 @@
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
     external_memory_at_last_mark_compact_ = external_memory_;
-    external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
+    external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
     SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
   } else if (HasLowYoungGenerationAllocationRate() &&
              old_generation_size_configured_) {
@@ -1491,18 +1482,18 @@
 
 void Heap::CheckNewSpaceExpansionCriteria() {
   if (FLAG_experimental_new_space_growth_heuristic) {
-    if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
-        survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
+    if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
+        survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
       // Grow the size of new space if there is room to grow, and more than 10%
       // have survived the last scavenge.
-      new_space_.Grow();
+      new_space_->Grow();
       survived_since_last_expansion_ = 0;
     }
-  } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
-             survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+  } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
+             survived_since_last_expansion_ > new_space_->TotalCapacity()) {
     // Grow the size of new space if there is room to grow, and enough data
     // has survived scavenge since the last expansion.
-    new_space_.Grow();
+    new_space_->Grow();
     survived_since_last_expansion_ = 0;
   }
 }
@@ -1541,6 +1532,11 @@
   emergency_stack_ = NULL;
 }
 
+void PromotionQueue::Destroy() {
+  DCHECK(is_empty());
+  delete emergency_stack_;
+  emergency_stack_ = NULL;
+}
 
 void PromotionQueue::RelocateQueueHead() {
   DCHECK(emergency_stack_ == NULL);
@@ -1615,13 +1611,13 @@
     // Register found wrappers with embedder so it can add them to its marking
     // deque and correctly manage the case when v8 scavenger collects the
     // wrappers by either keeping wrappables alive, or cleaning marking deque.
-    mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
+    RegisterWrappersWithEmbedderHeapTracer();
   }
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
-  new_space_.Flip();
-  new_space_.ResetAllocationInfo();
+  new_space_->Flip();
+  new_space_->ResetAllocationInfo();
 
   // We need to sweep newly copied objects which can be either in the
   // to space or promoted to the old generation.  For to-space
@@ -1640,7 +1636,7 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceStart();
+  Address new_space_front = new_space_->ToSpaceStart();
   promotion_queue_.Initialize();
 
   PromotionMode promotion_mode = CurrentPromotionMode();
@@ -1737,16 +1733,17 @@
   ScavengeWeakObjectRetainer weak_object_retainer(this);
   ProcessYoungWeakReferences(&weak_object_retainer);
 
-  DCHECK(new_space_front == new_space_.top());
+  DCHECK(new_space_front == new_space_->top());
 
   // Set age mark.
-  new_space_.set_age_mark(new_space_.top());
+  new_space_->set_age_mark(new_space_->top());
 
   ArrayBufferTracker::FreeDeadInNewSpace(this);
 
   // Update how much has survived scavenge.
-  IncrementYoungSurvivorsCounter(static_cast<int>(
-      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+  IncrementYoungSurvivorsCounter(
+      static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
+                       new_space_->Size()));
 
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
@@ -1910,11 +1907,11 @@
                          Address new_space_front,
                          PromotionMode promotion_mode) {
   do {
-    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+    SemiSpace::AssertValidRange(new_space_front, new_space_->top());
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front != new_space_.top()) {
+    while (new_space_front != new_space_->top()) {
       if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
         if (promotion_mode == PROMOTE_MARKED) {
@@ -1953,7 +1950,7 @@
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front != new_space_.top());
+  } while (new_space_front != new_space_->top());
 
   return new_space_front;
 }
@@ -2283,6 +2280,8 @@
     DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
                            Context::NUMBER_FUNCTION_INDEX)
     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
@@ -2391,6 +2390,12 @@
   }
 
   {
+    AllocationResult allocation = AllocateEmptyScopeInfo();
+    if (!allocation.To(&obj)) return false;
+  }
+
+  set_empty_scope_info(ScopeInfo::cast(obj));
+  {
     AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
     if (!allocation.To(&obj)) return false;
   }
@@ -2432,7 +2437,7 @@
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
   int size = HeapNumber::kSize;
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
 
   AllocationSpace space = SelectSpace(pretenure);
 
@@ -2452,7 +2457,7 @@
   AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count],      \
                                         PretenureFlag pretenure) {        \
     int size = Type::kSize;                                               \
-    STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize);        \
+    STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize);              \
                                                                           \
     AllocationSpace space = SelectSpace(pretenure);                       \
                                                                           \
@@ -2476,7 +2481,7 @@
 
 AllocationResult Heap::AllocateCell(Object* value) {
   int size = Cell::kSize;
-  STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
 
   HeapObject* result = nullptr;
   {
@@ -2488,10 +2493,9 @@
   return result;
 }
 
-
 AllocationResult Heap::AllocatePropertyCell() {
   int size = PropertyCell::kSize;
-  STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
 
   HeapObject* result = nullptr;
   AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
@@ -2509,7 +2513,7 @@
 
 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
   int size = WeakCell::kSize;
-  STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
   HeapObject* result = nullptr;
   {
     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
@@ -2729,12 +2733,6 @@
 #undef SYMBOL_INIT
   }
 
-  // Allocate the dictionary of intrinsic function names.
-  Handle<NameDictionary> intrinsic_names =
-      NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
-  Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
-  set_intrinsic_function_names(*intrinsic_names);
-
   Handle<NameDictionary> empty_properties_dictionary =
       NameDictionary::New(isolate(), 0, TENURED);
   empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
@@ -2777,18 +2775,18 @@
 
   {
     StaticFeedbackVectorSpec spec;
-    FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
-    FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
-    FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
-    FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
+    FeedbackVectorSlot slot = spec.AddLoadICSlot();
+    DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
 
-    DCHECK_EQ(load_ic_slot,
-              FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
-    DCHECK_EQ(keyed_load_ic_slot,
+    slot = spec.AddKeyedLoadICSlot();
+    DCHECK_EQ(slot,
               FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-    DCHECK_EQ(store_ic_slot,
-              FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
-    DCHECK_EQ(keyed_store_ic_slot,
+
+    slot = spec.AddStoreICSlot();
+    DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
+
+    slot = spec.AddKeyedStoreICSlot();
+    DCHECK_EQ(slot,
               FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
 
     Handle<TypeFeedbackMetadata> dummy_metadata =
@@ -2796,19 +2794,36 @@
     Handle<TypeFeedbackVector> dummy_vector =
         TypeFeedbackVector::New(isolate(), dummy_metadata);
 
-    Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
-    dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
-    dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
-    dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
-    dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
-
     set_dummy_vector(*dummy_vector);
+
+    // Now initialize dummy vector's entries.
+    LoadICNexus(isolate()).ConfigureMegamorphic();
+    StoreICNexus(isolate()).ConfigureMegamorphic();
+    KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
+    KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
   }
 
   {
+    // Create a canonical empty TypeFeedbackVector, which is shared by all
+    // functions that don't need actual type feedback slots. Note however
+    // that all these functions will share the same invocation count, but
+    // that shouldn't matter since we only use the invocation count to
+    // relativize the absolute call counts, but we can only have call counts
+    // if we have actual feedback slots.
+    Handle<FixedArray> empty_type_feedback_vector = factory->NewFixedArray(
+        TypeFeedbackVector::kReservedIndexCount, TENURED);
+    empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
+                                    empty_fixed_array());
+    empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
+                                    Smi::FromInt(0));
+    set_empty_type_feedback_vector(*empty_type_feedback_vector);
+
+    // We use a canonical empty LiteralsArray for all functions that neither
+    // have literals nor need a TypeFeedbackVector (besides the invocation
+    // count special slot).
     Handle<FixedArray> empty_literals_array =
         factory->NewFixedArray(1, TENURED);
-    empty_literals_array->set(0, *factory->empty_fixed_array());
+    empty_literals_array->set(0, *empty_type_feedback_vector);
     set_empty_literals_array(*empty_literals_array);
   }
 
@@ -2882,6 +2897,10 @@
       handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
   set_species_protector(*species_cell);
 
+  cell = factory->NewPropertyCell();
+  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  set_string_length_protector(*cell);
+
   set_serialized_templates(empty_fixed_array());
 
   set_weak_stack_trace_list(Smi::FromInt(0));
@@ -3009,7 +3028,7 @@
 AllocationResult Heap::AllocateForeign(Address address,
                                        PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
   Foreign* result = nullptr;
   AllocationResult allocation = Allocate(foreign_map(), space);
@@ -3776,6 +3795,18 @@
   return result;
 }
 
+AllocationResult Heap::AllocateEmptyScopeInfo() {
+  int size = FixedArray::SizeFor(0);
+  HeapObject* result = nullptr;
+  {
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  // Initialize the object.
+  result->set_map_no_write_barrier(scope_info_map());
+  FixedArray::cast(result)->set_length(0);
+  return result;
+}
 
 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
   if (!InNewSpace(src)) {
@@ -3908,7 +3939,14 @@
   int size = FixedArray::SizeFor(length);
   AllocationSpace space = SelectSpace(pretenure);
 
-  return AllocateRaw(size, space);
+  AllocationResult result = AllocateRaw(size, space);
+  if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
+      FLAG_use_marking_progress_bar) {
+    MemoryChunk* chunk =
+        MemoryChunk::FromAddress(result.ToObjectChecked()->address());
+    chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+  }
+  return result;
 }
 
 
@@ -3988,7 +4026,7 @@
 
 AllocationResult Heap::AllocateSymbol() {
   // Statically ensure that it is safe to allocate symbols in paged spaces.
-  STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
+  STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
 
   HeapObject* result = nullptr;
   AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
@@ -4049,7 +4087,8 @@
 void Heap::MakeHeapIterable() {
   DCHECK(AllowHeapAllocation::IsAllowed());
   if (!IsHeapIterable()) {
-    CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
+    CollectAllGarbage(kMakeHeapIterableMask,
+                      GarbageCollectionReason::kMakeHeapIterable);
   }
   if (mark_compact_collector()->sweeping_in_progress()) {
     mark_compact_collector()->EnsureSweepingCompleted();
@@ -4081,10 +4120,10 @@
       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
   if (FLAG_trace_mutator_utilization) {
-    PrintIsolate(isolate(),
-                 "Young generation mutator utilization = %.3f ("
-                 "mutator_speed=%.f, gc_speed=%.f)\n",
-                 result, mutator_speed, gc_speed);
+    isolate()->PrintWithTimestamp(
+        "Young generation mutator utilization = %.3f ("
+        "mutator_speed=%.f, gc_speed=%.f)\n",
+        result, mutator_speed, gc_speed);
   }
   return result;
 }
@@ -4097,10 +4136,10 @@
       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
   if (FLAG_trace_mutator_utilization) {
-    PrintIsolate(isolate(),
-                 "Old generation mutator utilization = %.3f ("
-                 "mutator_speed=%.f, gc_speed=%.f)\n",
-                 result, mutator_speed, gc_speed);
+    isolate()->PrintWithTimestamp(
+        "Old generation mutator utilization = %.3f ("
+        "mutator_speed=%.f, gc_speed=%.f)\n",
+        result, mutator_speed, gc_speed);
   }
   return result;
 }
@@ -4170,44 +4209,49 @@
   if (ShouldReduceMemory() ||
       ((allocation_throughput != 0) &&
        (allocation_throughput < kLowAllocationThroughput))) {
-    new_space_.Shrink();
+    new_space_->Shrink();
     UncommitFromSpace();
   }
 }
 
+bool Heap::MarkingDequesAreEmpty() {
+  return mark_compact_collector()->marking_deque()->IsEmpty() &&
+         (!UsingEmbedderHeapTracer() ||
+          (wrappers_to_trace() == 0 &&
+           embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
+}
 
-void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
+void Heap::FinalizeIncrementalMarkingIfComplete(
+    GarbageCollectionReason gc_reason) {
   if (incremental_marking()->IsMarking() &&
       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
        (!incremental_marking()->finalize_marking_completed() &&
-        mark_compact_collector()->marking_deque()->IsEmpty()))) {
-    FinalizeIncrementalMarking(comment);
+        MarkingDequesAreEmpty()))) {
+    FinalizeIncrementalMarking(gc_reason);
   } else if (incremental_marking()->IsComplete() ||
              (mark_compact_collector()->marking_deque()->IsEmpty())) {
-    CollectAllGarbage(current_gc_flags_, comment);
+    CollectAllGarbage(current_gc_flags_, gc_reason);
   }
 }
 
-
-bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
+bool Heap::TryFinalizeIdleIncrementalMarking(
+    double idle_time_in_ms, GarbageCollectionReason gc_reason) {
   size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
   double final_incremental_mark_compact_speed_in_bytes_per_ms =
       tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
   if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
       (!incremental_marking()->finalize_marking_completed() &&
-       mark_compact_collector()->marking_deque()->IsEmpty() &&
+       MarkingDequesAreEmpty() &&
        gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
            idle_time_in_ms))) {
-    FinalizeIncrementalMarking(
-        "Idle notification: finalize incremental marking");
+    FinalizeIncrementalMarking(gc_reason);
     return true;
   } else if (incremental_marking()->IsComplete() ||
-             (mark_compact_collector()->marking_deque()->IsEmpty() &&
+             (MarkingDequesAreEmpty() &&
               gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
                   idle_time_in_ms, size_of_objects,
                   final_incremental_mark_compact_speed_in_bytes_per_ms))) {
-    CollectAllGarbage(current_gc_flags_,
-                      "idle notification: finalize incremental marking");
+    CollectAllGarbage(current_gc_flags_, gc_reason);
     return true;
   }
   return false;
@@ -4267,22 +4311,23 @@
       result = true;
       break;
     case DO_INCREMENTAL_STEP: {
-      if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
-        result = true;
-      } else {
-        incremental_marking()
-            ->incremental_marking_job()
-            ->NotifyIdleTaskProgress();
-        result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
-                 IncrementalMarkingJob::IdleTask::kDone;
+      const double remaining_idle_time_in_ms =
+          incremental_marking()->AdvanceIncrementalMarking(
+              deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+              IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
+      if (remaining_idle_time_in_ms > 0.0) {
+        TryFinalizeIdleIncrementalMarking(
+            remaining_idle_time_in_ms,
+            GarbageCollectionReason::kFinalizeMarkingViaTask);
       }
+      result = incremental_marking()->IsStopped();
       break;
     }
     case DO_FULL_GC: {
       DCHECK(contexts_disposed_ > 0);
       HistogramTimerScope scope(isolate_->counters()->gc_context());
       TRACE_EVENT0("v8", "V8.GCContext");
-      CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
+      CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
       break;
     }
     case DO_NOTHING:
@@ -4328,8 +4373,7 @@
 
   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
       FLAG_trace_idle_notification_verbose) {
-    PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
-    PrintF(
+    isolate_->PrintWithTimestamp(
         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
         "ms, deadline usage %.2f ms [",
         idle_time_in_ms, idle_time_in_ms - deadline_difference,
@@ -4416,10 +4460,11 @@
     }
   }
   if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
-    CollectGarbageOnMemoryPressure("memory pressure");
+    CollectGarbageOnMemoryPressure();
   } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
-      StartIdleIncrementalMarking();
+      StartIncrementalMarking(kReduceMemoryFootprintMask,
+                              GarbageCollectionReason::kMemoryPressure);
     }
   }
   MemoryReducer::Event event;
@@ -4428,7 +4473,7 @@
   memory_reducer_->NotifyPossibleGarbage(event);
 }
 
-void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+void Heap::CollectGarbageOnMemoryPressure() {
   const int kGarbageThresholdInBytes = 8 * MB;
   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
   // This constant is the maximum response time in RAIL performance model.
@@ -4436,7 +4481,8 @@
 
   double start = MonotonicallyIncreasingTimeInMs();
   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
-                    source, kGCCallbackFlagCollectAllAvailableGarbage);
+                    GarbageCollectionReason::kMemoryPressure,
+                    kGCCallbackFlagCollectAllAvailableGarbage);
   double end = MonotonicallyIncreasingTimeInMs();
 
   // Estimate how much memory we can free.
@@ -4451,11 +4497,13 @@
     // Otherwise, start incremental marking.
     if (end - start < kMaxMemoryPressurePauseMs / 2) {
       CollectAllGarbage(
-          kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
+          kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+          GarbageCollectionReason::kMemoryPressure,
           kGCCallbackFlagCollectAllAvailableGarbage);
     } else {
       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
-        StartIdleIncrementalMarking();
+        StartIncrementalMarking(kReduceMemoryFootprintMask,
+                                GarbageCollectionReason::kMemoryPressure);
       }
     }
   }
@@ -4527,7 +4575,7 @@
   PrintF("Heap statistics : ");
   memory_allocator()->ReportStatistics();
   PrintF("To space : ");
-  new_space_.ReportStatistics();
+  new_space_->ReportStatistics();
   PrintF("Old space : ");
   old_space_->ReportStatistics();
   PrintF("Code space : ");
@@ -4541,12 +4589,64 @@
 
 #endif  // DEBUG
 
+const char* Heap::GarbageCollectionReasonToString(
+    GarbageCollectionReason gc_reason) {
+  switch (gc_reason) {
+    case GarbageCollectionReason::kAllocationFailure:
+      return "allocation failure";
+    case GarbageCollectionReason::kAllocationLimit:
+      return "allocation limit";
+    case GarbageCollectionReason::kContextDisposal:
+      return "context disposal";
+    case GarbageCollectionReason::kCountersExtension:
+      return "counters extension";
+    case GarbageCollectionReason::kDebugger:
+      return "debugger";
+    case GarbageCollectionReason::kDeserializer:
+      return "deserialize";
+    case GarbageCollectionReason::kExternalMemoryPressure:
+      return "external memory pressure";
+    case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
+      return "finalize incremental marking via stack guard";
+    case GarbageCollectionReason::kFinalizeMarkingViaTask:
+      return "finalize incremental marking via task";
+    case GarbageCollectionReason::kFullHashtable:
+      return "full hash-table";
+    case GarbageCollectionReason::kHeapProfiler:
+      return "heap profiler";
+    case GarbageCollectionReason::kIdleTask:
+      return "idle task";
+    case GarbageCollectionReason::kLastResort:
+      return "last resort";
+    case GarbageCollectionReason::kLowMemoryNotification:
+      return "low memory notification";
+    case GarbageCollectionReason::kMakeHeapIterable:
+      return "make heap iterable";
+    case GarbageCollectionReason::kMemoryPressure:
+      return "memory pressure";
+    case GarbageCollectionReason::kMemoryReducer:
+      return "memory reducer";
+    case GarbageCollectionReason::kRuntime:
+      return "runtime";
+    case GarbageCollectionReason::kSamplingProfiler:
+      return "sampling profiler";
+    case GarbageCollectionReason::kSnapshotCreator:
+      return "snapshot creator";
+    case GarbageCollectionReason::kTesting:
+      return "testing";
+    case GarbageCollectionReason::kUnknown:
+      return "unknown";
+  }
+  UNREACHABLE();
+  return "";
+}
+
 bool Heap::Contains(HeapObject* value) {
   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
     return false;
   }
   return HasBeenSetUp() &&
-         (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
+         (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
           code_space_->Contains(value) || map_space_->Contains(value) ||
           lo_space_->Contains(value));
 }
@@ -4556,7 +4656,7 @@
     return false;
   }
   return HasBeenSetUp() &&
-         (new_space_.ToSpaceContainsSlow(addr) ||
+         (new_space_->ToSpaceContainsSlow(addr) ||
           old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
           map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
 }
@@ -4569,7 +4669,7 @@
 
   switch (space) {
     case NEW_SPACE:
-      return new_space_.ToSpaceContains(value);
+      return new_space_->ToSpaceContains(value);
     case OLD_SPACE:
       return old_space_->Contains(value);
     case CODE_SPACE:
@@ -4591,7 +4691,7 @@
 
   switch (space) {
     case NEW_SPACE:
-      return new_space_.ToSpaceContainsSlow(addr);
+      return new_space_->ToSpaceContainsSlow(addr);
     case OLD_SPACE:
       return old_space_->ContainsSlow(addr);
     case CODE_SPACE:
@@ -4654,7 +4754,7 @@
   VerifySmisVisitor smis_visitor;
   IterateSmiRoots(&smis_visitor);
 
-  new_space_.Verify();
+  new_space_->Verify();
 
   old_space_->Verify(&visitor);
   map_space_->Verify(&visitor);
@@ -4673,9 +4773,9 @@
 
 
 void Heap::ZapFromSpace() {
-  if (!new_space_.IsFromSpaceCommitted()) return;
-  for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
-                                      new_space_.FromSpaceEnd())) {
+  if (!new_space_->IsFromSpaceCommitted()) return;
+  for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
+                                      new_space_->FromSpaceEnd())) {
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4967,7 +5067,7 @@
 
   if (FLAG_stress_compaction) {
     // This will cause more frequent GCs when stressing.
-    max_semi_space_size_ = Page::kPageSize;
+    max_semi_space_size_ = MB;
   }
 
   // The new space size must be a power of two to support single-bit testing
@@ -5018,7 +5118,7 @@
   old_generation_allocation_limit_ = initial_old_generation_size_;
 
   // We rely on being able to allocate new arrays in paged spaces.
-  DCHECK(Page::kMaxRegularHeapObjectSize >=
+  DCHECK(kMaxRegularHeapObjectSize >=
          (JSArray::kSize +
           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
           AllocationMemento::kSize));
@@ -5060,8 +5160,8 @@
 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
   *stats->start_marker = HeapStats::kStartMarker;
   *stats->end_marker = HeapStats::kEndMarker;
-  *stats->new_space_size = new_space_.SizeAsInt();
-  *stats->new_space_capacity = new_space_.Capacity();
+  *stats->new_space_size = new_space_->Size();
+  *stats->new_space_capacity = new_space_->Capacity();
   *stats->old_space_size = old_space_->SizeOfObjects();
   *stats->old_space_capacity = old_space_->Capacity();
   *stats->code_space_size = code_space_->SizeOfObjects();
@@ -5183,11 +5283,19 @@
   CHECK(old_gen_size > 0);
   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
   limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
-  limit += new_space_.Capacity();
+  limit += new_space_->Capacity();
   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
   return Min(limit, halfway_to_the_max);
 }
 
+intptr_t Heap::MinimumAllocationLimitGrowingStep() {
+  const double kRegularAllocationLimitGrowingStep = 8;
+  const double kLowMemoryAllocationLimitGrowingStep = 2;
+  intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+  return limit * (ShouldOptimizeForMemoryUsage()
+                      ? kLowMemoryAllocationLimitGrowingStep
+                      : kRegularAllocationLimitGrowingStep);
+}
 
 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
                                            double gc_speed,
@@ -5195,11 +5303,11 @@
   double factor = HeapGrowingFactor(gc_speed, mutator_speed);
 
   if (FLAG_trace_gc_verbose) {
-    PrintIsolate(isolate_,
-                 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
-                 "(gc=%.f, mutator=%.f)\n",
-                 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
-                 gc_speed, mutator_speed);
+    isolate_->PrintWithTimestamp(
+        "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+        "(gc=%.f, mutator=%.f)\n",
+        factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
+        mutator_speed);
   }
 
   if (IsMemoryConstrainedDevice()) {
@@ -5223,14 +5331,13 @@
       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
 
   if (FLAG_trace_gc_verbose) {
-    PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
-                           " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
-                 old_gen_size / KB, old_generation_allocation_limit_ / KB,
-                 factor);
+    isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
+                                 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
+                                 old_gen_size / KB,
+                                 old_generation_allocation_limit_ / KB, factor);
   }
 }
 
-
 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
                                               double gc_speed,
                                               double mutator_speed) {
@@ -5238,17 +5345,64 @@
   intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
   if (limit < old_generation_allocation_limit_) {
     if (FLAG_trace_gc_verbose) {
-      PrintIsolate(isolate_,
-                   "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
-                   " KB, "
-                   "new limit: %" V8PRIdPTR " KB (%.1f)\n",
-                   old_gen_size / KB, old_generation_allocation_limit_ / KB,
-                   limit / KB, factor);
+      isolate_->PrintWithTimestamp(
+          "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+          " KB, "
+          "new limit: %" V8PRIdPTR " KB (%.1f)\n",
+          old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
+          factor);
     }
     old_generation_allocation_limit_ = limit;
   }
 }
 
+// This predicate is called when an old generation space cannot allocated from
+// the free list and is about to add a new page. Returning false will cause a
+// major GC. It happens when the old generation allocation limit is reached and
+// - either we need to optimize for memory usage,
+// - or the incremental marking is not in progress and we cannot start it.
+bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
+  if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
+  // We reached the old generation allocation limit.
+
+  if (ShouldOptimizeForMemoryUsage()) return false;
+
+  if (incremental_marking()->IsStopped() &&
+      IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
+    // We cannot start incremental marking.
+    return false;
+  }
+  return true;
+}
+
+// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
+// The kNoLimit means that either incremental marking is disabled or it is too
+// early to start incremental marking.
+// The kSoftLimit means that incremental marking should be started soon.
+// The kHardLimit means that incremental marking should be started immediately.
+Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
+  if (!incremental_marking()->CanBeActivated() ||
+      PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
+    // Incremental marking is disabled or it is too early to start.
+    return IncrementalMarkingLimit::kNoLimit;
+  }
+  if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
+      HighMemoryPressure()) {
+    // If there is high memory pressure or stress testing is enabled, then
+    // start marking immediately.
+    return IncrementalMarkingLimit::kHardLimit;
+  }
+  intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
+  if (old_generation_space_available > new_space_->Capacity()) {
+    return IncrementalMarkingLimit::kNoLimit;
+  }
+  // We are close to the allocation limit.
+  // Choose between the hard and the soft limits.
+  if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
+    return IncrementalMarkingLimit::kHardLimit;
+  }
+  return IncrementalMarkingLimit::kSoftLimit;
+}
 
 void Heap::EnableInlineAllocation() {
   if (!inline_allocation_disabled_) return;
@@ -5316,33 +5470,30 @@
   // Initialize incremental marking.
   incremental_marking_ = new IncrementalMarking(this);
 
-  // Set up new space.
-  if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    space_[i] = nullptr;
+  }
+
+  space_[NEW_SPACE] = new_space_ = new NewSpace(this);
+  if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
     return false;
   }
   new_space_top_after_last_gc_ = new_space()->top();
 
-  // Initialize old space.
-  old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
-  if (old_space_ == NULL) return false;
+  space_[OLD_SPACE] = old_space_ =
+      new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
   if (!old_space_->SetUp()) return false;
 
-  // Initialize the code space, set its maximum capacity to the old
-  // generation size. It needs executable memory.
-  code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
-  if (code_space_ == NULL) return false;
+  space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
   if (!code_space_->SetUp()) return false;
 
-  // Initialize map space.
-  map_space_ = new MapSpace(this, MAP_SPACE);
-  if (map_space_ == NULL) return false;
+  space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
   if (!map_space_->SetUp()) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, LO_SPACE);
-  if (lo_space_ == NULL) return false;
+  space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
   if (!lo_space_->SetUp()) return false;
 
   // Set up the seed that is used to randomize the string hash function.
@@ -5362,20 +5513,14 @@
   }
 
   tracer_ = new GCTracer(this);
-
   scavenge_collector_ = new Scavenger(this);
-
   mark_compact_collector_ = new MarkCompactCollector(this);
-
   gc_idle_time_handler_ = new GCIdleTimeHandler();
-
   memory_reducer_ = new MemoryReducer(this);
-
   if (FLAG_track_gc_object_stats) {
     live_object_stats_ = new ObjectStats(this);
     dead_object_stats_ = new ObjectStats(this);
   }
-
   scavenge_job_ = new ScavengeJob();
 
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -5435,28 +5580,52 @@
 
 
 void Heap::NotifyDeserializationComplete() {
-  deserialization_complete_ = true;
-#ifdef DEBUG
-  // All pages right after bootstrapping must be marked as never-evacuate.
+  DCHECK_EQ(0, gc_count());
   PagedSpaces spaces(this);
   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+    if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
+#ifdef DEBUG
+    // All pages right after bootstrapping must be marked as never-evacuate.
     for (Page* p : *s) {
       CHECK(p->NeverEvacuate());
     }
-  }
 #endif  // DEBUG
+  }
+
+  deserialization_complete_ = true;
 }
 
 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
-  mark_compact_collector()->SetEmbedderHeapTracer(tracer);
+  DCHECK_NOT_NULL(tracer);
+  CHECK_NULL(embedder_heap_tracer_);
+  embedder_heap_tracer_ = tracer;
 }
 
-bool Heap::UsingEmbedderHeapTracer() {
-  return mark_compact_collector()->UsingEmbedderHeapTracer();
+void Heap::RegisterWrappersWithEmbedderHeapTracer() {
+  DCHECK(UsingEmbedderHeapTracer());
+  if (wrappers_to_trace_.empty()) {
+    return;
+  }
+  embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+  wrappers_to_trace_.clear();
 }
 
 void Heap::TracePossibleWrapper(JSObject* js_object) {
-  mark_compact_collector()->TracePossibleWrapper(js_object);
+  DCHECK(js_object->WasConstructedFromApiFunction());
+  if (js_object->GetInternalFieldCount() >= 2 &&
+      js_object->GetInternalField(0) &&
+      js_object->GetInternalField(0) != undefined_value() &&
+      js_object->GetInternalField(1) != undefined_value()) {
+    DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
+    wrappers_to_trace_.push_back(std::pair<void*, void*>(
+        reinterpret_cast<void*>(js_object->GetInternalField(0)),
+        reinterpret_cast<void*>(js_object->GetInternalField(1))));
+  }
+}
+
+bool Heap::RequiresImmediateWrapperProcessing() {
+  const size_t kTooManyWrappers = 16000;
+  return wrappers_to_trace_.size() > kTooManyWrappers;
 }
 
 void Heap::RegisterExternallyReferencedObject(Object** object) {
@@ -5480,33 +5649,18 @@
 
   UpdateMaximumCommitted();
 
-  if (FLAG_print_cumulative_gc_stat) {
-    PrintF("\n");
-    PrintF("gc_count=%d ", gc_count_);
-    PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
-    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
-    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
-    PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
-    PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
-    PrintF("total_sweeping_time=%.1f ",
-           tracer()->cumulative_sweeping_duration());
-    PrintF("\n\n");
-  }
-
   if (FLAG_print_max_heap_committed) {
     PrintF("\n");
-    PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
-           MaximumCommittedMemory());
-    PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
-           new_space_.MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
+    PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
+    PrintF("maximum_committed_by_new_space=%" PRIuS " ",
+           new_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_space=%" PRIuS " ",
            old_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
+    PrintF("maximum_committed_by_code_space=%" PRIuS " ",
            code_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
+    PrintF("maximum_committed_by_map_space=%" PRIuS " ",
            map_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
+    PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
            lo_space_->MaximumCommittedMemory());
     PrintF("\n\n");
   }
@@ -5560,7 +5714,9 @@
   delete tracer_;
   tracer_ = nullptr;
 
-  new_space_.TearDown();
+  new_space_->TearDown();
+  delete new_space_;
+  new_space_ = nullptr;
 
   if (old_space_ != NULL) {
     delete old_space_;
@@ -5599,6 +5755,9 @@
 
   delete memory_allocator_;
   memory_allocator_ = nullptr;
+
+  delete embedder_reference_reporter_;
+  embedder_reference_reporter_ = nullptr;
 }
 
 
@@ -5879,14 +6038,10 @@
   }
 }
 
-
 SpaceIterator::SpaceIterator(Heap* heap)
-    : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
-
+    : heap_(heap), current_space_(FIRST_SPACE - 1) {}
 
 SpaceIterator::~SpaceIterator() {
-  // Delete active iterator if any.
-  delete iterator_;
 }
 
 
@@ -5895,48 +6050,9 @@
   return current_space_ != LAST_SPACE;
 }
 
-
-ObjectIterator* SpaceIterator::next() {
-  if (iterator_ != NULL) {
-    delete iterator_;
-    iterator_ = NULL;
-    // Move to the next space
-    current_space_++;
-    if (current_space_ > LAST_SPACE) {
-      return NULL;
-    }
-  }
-
-  // Return iterator for the new current space.
-  return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
-  DCHECK(iterator_ == NULL);
-
-  switch (current_space_) {
-    case NEW_SPACE:
-      iterator_ = new SemiSpaceIterator(heap_->new_space());
-      break;
-    case OLD_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->old_space());
-      break;
-    case CODE_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->code_space());
-      break;
-    case MAP_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->map_space());
-      break;
-    case LO_SPACE:
-      iterator_ = new LargeObjectIterator(heap_->lo_space());
-      break;
-  }
-
-  // Return the newly allocated iterator;
-  DCHECK(iterator_ != NULL);
-  return iterator_;
+Space* SpaceIterator::next() {
+  DCHECK(has_next());
+  return heap_->space(++current_space_);
 }
 
 
@@ -6021,7 +6137,7 @@
     default:
       break;
   }
-  object_iterator_ = space_iterator_->next();
+  object_iterator_ = space_iterator_->next()->GetObjectIterator();
 }
 
 
@@ -6034,8 +6150,6 @@
     DCHECK(object_iterator_ == nullptr);
   }
 #endif
-  // Make sure the last iterator is deallocated.
-  delete object_iterator_;
   delete space_iterator_;
   delete filter_;
 }
@@ -6052,22 +6166,22 @@
 
 HeapObject* HeapIterator::NextObject() {
   // No iterator means we are done.
-  if (object_iterator_ == nullptr) return nullptr;
+  if (object_iterator_.get() == nullptr) return nullptr;
 
-  if (HeapObject* obj = object_iterator_->Next()) {
+  if (HeapObject* obj = object_iterator_.get()->Next()) {
     // If the current iterator has more objects we are fine.
     return obj;
   } else {
     // Go though the spaces looking for one that has objects.
     while (space_iterator_->has_next()) {
-      object_iterator_ = space_iterator_->next();
-      if (HeapObject* obj = object_iterator_->Next()) {
+      object_iterator_ = space_iterator_->next()->GetObjectIterator();
+      if (HeapObject* obj = object_iterator_.get()->Next()) {
         return obj;
       }
     }
   }
   // Done with the last space.
-  object_iterator_ = nullptr;
+  object_iterator_.reset(nullptr);
   return nullptr;
 }
 
@@ -6260,95 +6374,10 @@
 }
 #endif
 
-
-void Heap::UpdateCumulativeGCStatistics(double duration,
-                                        double spent_in_mutator,
-                                        double marking_time) {
-  if (FLAG_print_cumulative_gc_stat) {
-    total_gc_time_ms_ += duration;
-    max_gc_pause_ = Max(max_gc_pause_, duration);
-    max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
-    min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
-  } else if (FLAG_trace_gc_verbose) {
+void Heap::UpdateTotalGCTime(double duration) {
+  if (FLAG_trace_gc_verbose) {
     total_gc_time_ms_ += duration;
   }
-
-  marking_time_ += marking_time;
-}
-
-
-int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  // Uses only lower 32 bits if pointers are larger.
-  uintptr_t addr_hash =
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
-  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-
-int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  int index = (Hash(map, name) & kHashMask);
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index + i];
-    if ((key.map == *map) && key.name->Equals(*name)) {
-      return field_offsets_[index + i];
-    }
-  }
-  return kNotFound;
-}
-
-
-void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
-                              int field_offset) {
-  DisallowHeapAllocation no_gc;
-  if (!name->IsUniqueName()) {
-    if (!StringTable::InternalizeStringIfExists(
-             name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
-      return;
-    }
-  }
-  // This cache is cleared only between mark compact passes, so we expect the
-  // cache to only contain old space names.
-  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
-
-  int index = (Hash(map, name) & kHashMask);
-  // After a GC there will be free slots, so we use them in order (this may
-  // help to get the most frequently used one in position 0).
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index];
-    Object* free_entry_indicator = NULL;
-    if (key.map == free_entry_indicator) {
-      key.map = *map;
-      key.name = *name;
-      field_offsets_[index + i] = field_offset;
-      return;
-    }
-  }
-  // No free entry found in this bucket, so we move them all down one and
-  // put the new entry at position zero.
-  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
-    Key& key = keys_[index + i];
-    Key& key2 = keys_[index + i - 1];
-    key = key2;
-    field_offsets_[index + i] = field_offsets_[index + i - 1];
-  }
-
-  // Write the new first entry.
-  Key& key = keys_[index];
-  key.map = *map;
-  key.name = *name;
-  field_offsets_[index] = field_offset;
-}
-
-
-void KeyedLookupCache::Clear() {
-  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
-
-void DescriptorLookupCache::Clear() {
-  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
 }
 
 void Heap::ExternalStringTable::CleanUp() {
diff --git a/src/heap/heap.h b/src/heap/heap.h
index b9b058c..cce467f 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -16,9 +16,8 @@
 #include "src/base/atomic-utils.h"
 #include "src/globals.h"
 #include "src/heap-symbols.h"
-// TODO(mstarzinger): One more include to kill!
-#include "src/heap/spaces.h"
 #include "src/list.h"
+#include "src/objects.h"
 
 namespace v8 {
 namespace internal {
@@ -49,6 +48,8 @@
   V(Map, one_byte_string_map, OneByteStringMap)                                \
   V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
   V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, module_info_entry_map, ModuleInfoEntryMap)                            \
+  V(Map, module_info_map, ModuleInfoMap)                                       \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, code_map, CodeMap)                                                    \
   V(Map, function_context_map, FunctionContextMap)                             \
@@ -59,7 +60,9 @@
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, transition_array_map, TransitionArrayMap)                             \
   V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
+  V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector)           \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
   V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   /* Entries beyond the first 32                                            */ \
@@ -164,6 +167,7 @@
   V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
   V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
   V(Cell, species_protector, SpeciesProtector)                                 \
+  V(PropertyCell, string_length_protector, StringLengthProtector)              \
   /* Special numbers */                                                        \
   V(HeapNumber, nan_value, NanValue)                                           \
   V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
@@ -185,7 +189,6 @@
   V(FixedArray, experimental_extra_natives_source_cache,                       \
     ExperimentalExtraNativesSourceCache)                                       \
   /* Lists and dictionaries */                                                 \
-  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
   V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
   V(Object, symbol_registry, SymbolRegistry)                                   \
   V(Object, script_list, ScriptList)                                           \
@@ -275,6 +278,8 @@
   V(FixedArrayMap)                      \
   V(CodeMap)                            \
   V(ScopeInfoMap)                       \
+  V(ModuleInfoEntryMap)                 \
+  V(ModuleInfoMap)                      \
   V(FixedCOWArrayMap)                   \
   V(FixedDoubleArrayMap)                \
   V(WeakCellMap)                        \
@@ -322,100 +327,87 @@
 class HeapStats;
 class HistogramTimer;
 class Isolate;
+class MemoryAllocator;
 class MemoryReducer;
+class ObjectIterator;
 class ObjectStats;
+class Page;
+class PagedSpace;
 class Scavenger;
 class ScavengeJob;
+class Space;
 class StoreBuffer;
+class TracePossibleWrapperReporter;
 class WeakObjectRetainer;
 
-enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
-
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-// The last page in to-space is used for the promotion queue. On conflict
-// during scavenge, the promotion queue is allocated externally and all
-// entries are copied to the external queue.
+enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
+
+enum ArrayStorageAllocationMode {
+  DONT_INITIALIZE_ARRAY_ELEMENTS,
+  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
+
+enum class ClearRecordedSlots { kYes, kNo };
+
+enum class ClearBlackArea { kYes, kNo };
+
+enum class GarbageCollectionReason {
+  kUnknown = 0,
+  kAllocationFailure = 1,
+  kAllocationLimit = 2,
+  kContextDisposal = 3,
+  kCountersExtension = 4,
+  kDebugger = 5,
+  kDeserializer = 6,
+  kExternalMemoryPressure = 7,
+  kFinalizeMarkingViaStackGuard = 8,
+  kFinalizeMarkingViaTask = 9,
+  kFullHashtable = 10,
+  kHeapProfiler = 11,
+  kIdleTask = 12,
+  kLastResort = 13,
+  kLowMemoryNotification = 14,
+  kMakeHeapIterable = 15,
+  kMemoryPressure = 16,
+  kMemoryReducer = 17,
+  kRuntime = 18,
+  kSamplingProfiler = 19,
+  kSnapshotCreator = 20,
+  kTesting = 21
+  // If you add new items here, then update the incremental_marking_reason,
+  // mark_compact_reason, and scavenge_reason counters in counters.h.
+  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+};
+
+// A queue of objects promoted during scavenge. Each object is accompanied by
+// its size to avoid dereferencing a map pointer for scanning. The last page in
+// to-space is used for the promotion queue. On conflict during scavenge, the
+// promotion queue is allocated externally and all entries are copied to the
+// external queue.
 class PromotionQueue {
  public:
   explicit PromotionQueue(Heap* heap)
-      : front_(NULL),
-        rear_(NULL),
-        limit_(NULL),
-        emergency_stack_(0),
+      : front_(nullptr),
+        rear_(nullptr),
+        limit_(nullptr),
+        emergency_stack_(nullptr),
         heap_(heap) {}
 
   void Initialize();
+  void Destroy();
 
-  void Destroy() {
-    DCHECK(is_empty());
-    delete emergency_stack_;
-    emergency_stack_ = NULL;
-  }
+  inline void SetNewLimit(Address limit);
+  inline bool IsBelowPromotionQueue(Address to_space_top);
 
-  Page* GetHeadPage() {
-    return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
-  }
-
-  void SetNewLimit(Address limit) {
-    // If we are already using an emergency stack, we can ignore it.
-    if (emergency_stack_) return;
-
-    // If the limit is not on the same page, we can ignore it.
-    if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
-
-    limit_ = reinterpret_cast<struct Entry*>(limit);
-
-    if (limit_ <= rear_) {
-      return;
-    }
-
-    RelocateQueueHead();
-  }
-
-  bool IsBelowPromotionQueue(Address to_space_top) {
-    // If an emergency stack is used, the to-space address cannot interfere
-    // with the promotion queue.
-    if (emergency_stack_) return true;
-
-    // If the given to-space top pointer and the head of the promotion queue
-    // are not on the same page, then the to-space objects are below the
-    // promotion queue.
-    if (GetHeadPage() != Page::FromAddress(to_space_top)) {
-      return true;
-    }
-    // If the to space top pointer is smaller or equal than the promotion
-    // queue head, then the to-space objects are below the promotion queue.
-    return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
-  }
+  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
+  inline void remove(HeapObject** target, int32_t* size,
+                     bool* was_marked_black);
 
   bool is_empty() {
     return (front_ == rear_) &&
-           (emergency_stack_ == NULL || emergency_stack_->length() == 0);
-  }
-
-  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
-
-  void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
-    DCHECK(!is_empty());
-    if (front_ == rear_) {
-      Entry e = emergency_stack_->RemoveLast();
-      *target = e.obj_;
-      *size = e.size_;
-      *was_marked_black = e.was_marked_black_;
-      return;
-    }
-
-    struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
-    *target = entry->obj_;
-    *size = entry->size_;
-    *was_marked_black = entry->was_marked_black_;
-
-    // Assert no underflow.
-    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
-                                reinterpret_cast<Address>(front_));
+           (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
   }
 
  private:
@@ -428,6 +420,8 @@
     bool was_marked_black_ : 1;
   };
 
+  inline Page* GetHeadPage();
+
   void RelocateQueueHead();
 
   // The front of the queue is higher in the memory page chain than the rear.
@@ -436,21 +430,94 @@
   struct Entry* limit_;
 
   List<Entry>* emergency_stack_;
-
   Heap* heap_;
 
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
+class AllocationResult {
+ public:
+  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+    return AllocationResult(space);
+  }
 
-enum ArrayStorageAllocationMode {
-  DONT_INITIALIZE_ARRAY_ELEMENTS,
-  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+  // Implicit constructor from Object*.
+  AllocationResult(Object* object)  // NOLINT
+      : object_(object) {
+    // AllocationResults can't return Smis, which are used to represent
+    // failure and the space to retry in.
+    CHECK(!object->IsSmi());
+  }
+
+  AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
+
+  inline bool IsRetry() { return object_->IsSmi(); }
+  inline HeapObject* ToObjectChecked();
+  inline AllocationSpace RetrySpace();
+
+  template <typename T>
+  bool To(T** obj) {
+    if (IsRetry()) return false;
+    *obj = T::cast(object_);
+    return true;
+  }
+
+ private:
+  explicit AllocationResult(AllocationSpace space)
+      : object_(Smi::FromInt(static_cast<int>(space))) {}
+
+  Object* object_;
 };
 
-enum class ClearRecordedSlots { kYes, kNo };
+STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
 
-enum class ClearBlackArea { kYes, kNo };
+#ifdef DEBUG
+struct CommentStatistic {
+  const char* comment;
+  int size;
+  int count;
+  void Clear() {
+    comment = NULL;
+    size = 0;
+    count = 0;
+  }
+  // Must be small, since an iteration is used for lookup.
+  static const int kMaxComments = 64;
+};
+#endif
+
+class NumberAndSizeInfo BASE_EMBEDDED {
+ public:
+  NumberAndSizeInfo() : number_(0), bytes_(0) {}
+
+  int number() const { return number_; }
+  void increment_number(int num) { number_ += num; }
+
+  int bytes() const { return bytes_; }
+  void increment_bytes(int size) { bytes_ += size; }
+
+  void clear() {
+    number_ = 0;
+    bytes_ = 0;
+  }
+
+ private:
+  int number_;
+  int bytes_;
+};
+
+// HistogramInfo class for recording a single "bar" of a histogram.  This
+// class is used for collecting statistics to print to the log file.
+class HistogramInfo : public NumberAndSizeInfo {
+ public:
+  HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
+
+  const char* name() { return name_; }
+  void set_name(const char* name) { name_ = name; }
+
+ private:
+  const char* name_;
+};
 
 class Heap {
  public:
@@ -637,30 +704,10 @@
   // should not happen during deserialization.
   void NotifyDeserializationComplete();
 
-  intptr_t old_generation_allocation_limit() const {
-    return old_generation_allocation_limit_;
-  }
-
-  bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
-
-  Address* NewSpaceAllocationTopAddress() {
-    return new_space_.allocation_top_address();
-  }
-  Address* NewSpaceAllocationLimitAddress() {
-    return new_space_.allocation_limit_address();
-  }
-
-  Address* OldSpaceAllocationTopAddress() {
-    return old_space_->allocation_top_address();
-  }
-  Address* OldSpaceAllocationLimitAddress() {
-    return old_space_->allocation_limit_address();
-  }
-
-  bool CanExpandOldGeneration(int size) {
-    if (force_oom_) return false;
-    return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
-  }
+  inline Address* NewSpaceAllocationTopAddress();
+  inline Address* NewSpaceAllocationLimitAddress();
+  inline Address* OldSpaceAllocationTopAddress();
+  inline Address* OldSpaceAllocationLimitAddress();
 
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
@@ -763,14 +810,6 @@
   // Returns false if not able to reserve.
   bool ReserveSpace(Reservation* reservations, List<Address>* maps);
 
-  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
-  bool UsingEmbedderHeapTracer();
-
-  void TracePossibleWrapper(JSObject* js_object);
-
-  void RegisterExternallyReferencedObject(Object** object);
-
   //
   // Support for the API.
   //
@@ -792,18 +831,6 @@
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
-  inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
-    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-
-    intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
-
-    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
-
-    if (HighMemoryPressure()) return true;
-
-    return false;
-  }
-
   void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
 
   // An object should be promoted if the object has survived a
@@ -817,8 +844,6 @@
 
   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
 
-  inline bool OldGenerationAllocationLimitReached();
-
   // Completely clear the Instanceof cache (to stop it keeping objects alive
   // around a GC).
   inline void CompletelyClearInstanceofCache();
@@ -847,6 +872,8 @@
     global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
   }
 
+  int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
+
   int64_t external_memory() { return external_memory_; }
   void update_external_memory(int64_t delta) { external_memory_ += delta; }
 
@@ -861,9 +888,7 @@
 
   void DeoptMarkedAllocationSites();
 
-  bool DeoptMaybeTenuredAllocationSites() {
-    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
-  }
+  inline bool DeoptMaybeTenuredAllocationSites();
 
   void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
                                              Handle<WeakCell> code);
@@ -937,39 +962,16 @@
   // Getters for spaces. =======================================================
   // ===========================================================================
 
-  Address NewSpaceTop() { return new_space_.top(); }
+  inline Address NewSpaceTop();
 
-  NewSpace* new_space() { return &new_space_; }
+  NewSpace* new_space() { return new_space_; }
   OldSpace* old_space() { return old_space_; }
   OldSpace* code_space() { return code_space_; }
   MapSpace* map_space() { return map_space_; }
   LargeObjectSpace* lo_space() { return lo_space_; }
 
-  PagedSpace* paged_space(int idx) {
-    switch (idx) {
-      case OLD_SPACE:
-        return old_space();
-      case MAP_SPACE:
-        return map_space();
-      case CODE_SPACE:
-        return code_space();
-      case NEW_SPACE:
-      case LO_SPACE:
-        UNREACHABLE();
-    }
-    return NULL;
-  }
-
-  Space* space(int idx) {
-    switch (idx) {
-      case NEW_SPACE:
-        return new_space();
-      case LO_SPACE:
-        return lo_space();
-      default:
-        return paged_space(idx);
-    }
-  }
+  inline PagedSpace* paged_space(int idx);
+  inline Space* space(int idx);
 
   // Returns name of the space.
   const char* GetSpaceName(int idx);
@@ -1090,22 +1092,22 @@
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
   inline bool CollectGarbage(
-      AllocationSpace space, const char* gc_reason = NULL,
+      AllocationSpace space, GarbageCollectionReason gc_reason,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
   // non-zero, then the slower precise sweeper is used, which leaves the heap
   // in a state where we can iterate over the heap visiting all objects.
   void CollectAllGarbage(
-      int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
+      int flags, GarbageCollectionReason gc_reason,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
   // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+  void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
 
   // Reports and external memory pressure event, either performs a major GC or
   // completes incremental marking in order to free external resources.
-  void ReportExternalMemoryPressure(const char* gc_reason = NULL);
+  void ReportExternalMemoryPressure();
 
   // Invoked when GC was requested via the stack guard.
   void HandleGCRequest();
@@ -1156,24 +1158,54 @@
 
   // Start incremental marking and ensure that idle time handler can perform
   // incremental steps.
-  void StartIdleIncrementalMarking();
+  void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
 
   // Starts incremental marking assuming incremental marking is currently
   // stopped.
-  void StartIncrementalMarking(int gc_flags = kNoGCFlags,
-                               const GCCallbackFlags gc_callback_flags =
-                                   GCCallbackFlags::kNoGCCallbackFlags,
-                               const char* reason = nullptr);
+  void StartIncrementalMarking(
+      int gc_flags, GarbageCollectionReason gc_reason,
+      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
 
-  void FinalizeIncrementalMarkingIfComplete(const char* comment);
+  void StartIncrementalMarkingIfAllocationLimitIsReached(
+      int gc_flags,
+      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
 
-  bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
+  void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
+
+  bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
+                                         GarbageCollectionReason gc_reason);
 
   void RegisterReservationsForBlackAllocation(Reservation* reservations);
 
   IncrementalMarking* incremental_marking() { return incremental_marking_; }
 
   // ===========================================================================
+  // Embedder heap tracer support. =============================================
+  // ===========================================================================
+
+  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+  bool UsingEmbedderHeapTracer() { return embedder_heap_tracer() != nullptr; }
+
+  void TracePossibleWrapper(JSObject* js_object);
+
+  void RegisterExternallyReferencedObject(Object** object);
+
+  void RegisterWrappersWithEmbedderHeapTracer();
+
+  // In order to avoid running out of memory we force tracing wrappers if there
+  // are too many of them.
+  bool RequiresImmediateWrapperProcessing();
+
+  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
+  EmbedderReachableReferenceReporter* embedder_reachable_reference_reporter() {
+    return embedder_reference_reporter_;
+  }
+
+  size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
+
+  // ===========================================================================
   // External string table API. ================================================
   // ===========================================================================
 
@@ -1258,19 +1290,19 @@
   intptr_t OldGenerationCapacity();
 
   // Returns the amount of memory currently committed for the heap.
-  intptr_t CommittedMemory();
+  size_t CommittedMemory();
 
   // Returns the amount of memory currently committed for the old space.
-  intptr_t CommittedOldGenerationMemory();
+  size_t CommittedOldGenerationMemory();
 
   // Returns the amount of executable memory currently committed for the heap.
-  intptr_t CommittedMemoryExecutable();
+  size_t CommittedMemoryExecutable();
 
   // Returns the amount of phyical memory currently committed for the heap.
   size_t CommittedPhysicalMemory();
 
   // Returns the maximum amount of memory ever committed for the heap.
-  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+  size_t MaximumCommittedMemory() { return maximum_committed_; }
 
   // Updates the maximum committed memory for the heap. Should be called
   // whenever a space grows.
@@ -1326,13 +1358,9 @@
     return static_cast<intptr_t>(total);
   }
 
-  void UpdateNewSpaceAllocationCounter() {
-    new_space_allocation_counter_ = NewSpaceAllocationCounter();
-  }
+  inline void UpdateNewSpaceAllocationCounter();
 
-  size_t NewSpaceAllocationCounter() {
-    return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
-  }
+  inline size_t NewSpaceAllocationCounter();
 
   // This should be used only for testing.
   void set_new_space_allocation_counter(size_t new_value) {
@@ -1340,16 +1368,18 @@
   }
 
   void UpdateOldGenerationAllocationCounter() {
-    old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+    old_generation_allocation_counter_at_last_gc_ =
+        OldGenerationAllocationCounter();
   }
 
   size_t OldGenerationAllocationCounter() {
-    return old_generation_allocation_counter_ + PromotedSinceLastGC();
+    return old_generation_allocation_counter_at_last_gc_ +
+           PromotedSinceLastGC();
   }
 
   // This should be used only for testing.
-  void set_old_generation_allocation_counter(size_t new_value) {
-    old_generation_allocation_counter_ = new_value;
+  void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
+    old_generation_allocation_counter_at_last_gc_ = new_value;
   }
 
   size_t PromotedSinceLastGC() {
@@ -1456,6 +1486,9 @@
   void ReportCodeStatistics(const char* title);
 #endif
 
+  static const char* GarbageCollectionReasonToString(
+      GarbageCollectionReason gc_reason);
+
  private:
   class PretenuringScope;
 
@@ -1588,6 +1621,10 @@
     return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
   }
 
+  // Checks whether both, the internal marking deque, and the embedder provided
+  // one are empty. Avoid in fast path as it potentially calls through the API.
+  bool MarkingDequesAreEmpty();
+
   void PreprocessStackTraces();
 
   // Checks whether a global GC is necessary
@@ -1607,7 +1644,7 @@
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
   bool CollectGarbage(
-      GarbageCollector collector, const char* gc_reason,
+      GarbageCollector collector, GarbageCollectionReason gc_reason,
       const char* collector_reason,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
@@ -1646,7 +1683,7 @@
   void EnsureFromSpaceIsCommitted();
 
   // Uncommit unused semi space.
-  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+  bool UncommitFromSpace();
 
   // Fill in bogus values in from space
   void ZapFromSpace();
@@ -1669,10 +1706,6 @@
   // Flush the number to string cache.
   void FlushNumberStringCache();
 
-  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
-  // Re-visit incremental marking heuristics.
-  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
-
   void ConfigureInitialOldGenerationSize();
 
   bool HasLowYoungGenerationAllocationRate();
@@ -1682,10 +1715,6 @@
 
   void ReduceNewSpaceSize();
 
-  bool TryFinalizeIdleIncrementalMarking(
-      double idle_time_in_ms, size_t size_of_objects,
-      size_t mark_compact_speed_in_bytes_per_ms);
-
   GCIdleTimeHeapState ComputeHeapState();
 
   bool PerformIdleTimeAction(GCIdleTimeAction action,
@@ -1705,13 +1734,13 @@
 
   void CompactRetainedMaps(ArrayList* retained_maps);
 
-  void CollectGarbageOnMemoryPressure(const char* source);
+  void CollectGarbageOnMemoryPressure();
 
   // Attempt to over-approximate the weak closure by marking object groups and
   // implicit references from global handles, but don't atomically complete
   // marking. If we continue to mark incrementally, we might have marked
   // objects that die later.
-  void FinalizeIncrementalMarking(const char* gc_reason);
+  void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
 
   // Returns the timer used for a given GC type.
   // - GCScavenger: young generation GC
@@ -1772,18 +1801,7 @@
     return old_generation_allocation_limit_ - PromotedTotalSize();
   }
 
-  // Returns maximum GC pause.
-  double get_max_gc_pause() { return max_gc_pause_; }
-
-  // Returns maximum size of objects alive after GC.
-  intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
-  // Returns minimal interval between two subsequent collections.
-  double get_min_in_mutator() { return min_in_mutator_; }
-
-  // Update GC statistics that are tracked on the Heap.
-  void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
-                                    double marking_time);
+  void UpdateTotalGCTime(double duration);
 
   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
 
@@ -1807,15 +1825,24 @@
   void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
                                        double mutator_speed);
 
-  intptr_t MinimumAllocationLimitGrowingStep() {
-    const double kRegularAllocationLimitGrowingStep = 8;
-    const double kLowMemoryAllocationLimitGrowingStep = 2;
-    intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
-    return limit * (ShouldOptimizeForMemoryUsage()
-                        ? kLowMemoryAllocationLimitGrowingStep
-                        : kRegularAllocationLimitGrowingStep);
+  intptr_t MinimumAllocationLimitGrowingStep();
+
+  intptr_t old_generation_allocation_limit() const {
+    return old_generation_allocation_limit_;
   }
 
+  bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
+
+  bool CanExpandOldGeneration(int size) {
+    if (force_oom_) return false;
+    return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
+  }
+
+  bool ShouldExpandOldGenerationOnAllocationFailure();
+
+  enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
+  IncrementalMarkingLimit IncrementalMarkingLimitReached();
+
   // ===========================================================================
   // Idle notification. ========================================================
   // ===========================================================================
@@ -2011,6 +2038,9 @@
   // Allocate empty fixed array.
   MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
 
+  // Allocate empty scope info.
+  MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();
+
   // Allocate empty fixed typed array of given type.
   MUST_USE_RESULT AllocationResult
       AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
@@ -2068,7 +2098,7 @@
   intptr_t initial_old_generation_size_;
   bool old_generation_size_configured_;
   intptr_t max_executable_size_;
-  intptr_t maximum_committed_;
+  size_t maximum_committed_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
@@ -2095,11 +2125,13 @@
 
   int global_ic_age_;
 
-  NewSpace new_space_;
+  NewSpace* new_space_;
   OldSpace* old_space_;
   OldSpace* code_space_;
   MapSpace* map_space_;
   LargeObjectSpace* lo_space_;
+  // Map from the space id to the space.
+  Space* space_[LAST_SPACE + 1];
   HeapState gc_state_;
   int gc_post_processing_depth_;
   Address new_space_top_after_last_gc_;
@@ -2136,10 +2168,6 @@
   // generation and on every allocation in large object space.
   intptr_t old_generation_allocation_limit_;
 
-  // Indicates that an allocation has failed in the old generation since the
-  // last GC.
-  bool old_gen_exhausted_;
-
   // Indicates that inline bump-pointer allocation has been globally disabled
   // for all spaces. This is used to disable allocations in generated code.
   bool inline_allocation_disabled_;
@@ -2168,7 +2196,6 @@
 
   GCTracer* tracer_;
 
-  int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
   double promotion_ratio_;
   double promotion_rate_;
@@ -2185,24 +2212,9 @@
   // of the allocation site.
   unsigned int maximum_size_scavenges_;
 
-  // Maximum GC pause.
-  double max_gc_pause_;
-
   // Total time spent in GC.
   double total_gc_time_ms_;
 
-  // Maximum size of objects alive after GC.
-  intptr_t max_alive_after_gc_;
-
-  // Minimal interval between two subsequent collections.
-  double min_in_mutator_;
-
-  // Cumulative GC time spent in marking.
-  double marking_time_;
-
-  // Cumulative GC time spent in sweeping.
-  double sweeping_time_;
-
   // Last time an idle notification happened.
   double last_idle_notification_time_;
 
@@ -2242,7 +2254,7 @@
   // This counter is increased before each GC and never reset. To
   // account for the bytes allocated since the last GC, use the
   // OldGenerationAllocationCounter() function.
-  size_t old_generation_allocation_counter_;
+  size_t old_generation_allocation_counter_at_last_gc_;
 
   // The size of objects in old generation after the last MarkCompact GC.
   size_t old_generation_size_at_last_gc_;
@@ -2293,6 +2305,10 @@
   // The depth of HeapIterator nestings.
   int heap_iterator_depth_;
 
+  EmbedderHeapTracer* embedder_heap_tracer_;
+  EmbedderReachableReferenceReporter* embedder_reference_reporter_;
+  std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+
   // Used for testing purposes.
   bool force_oom_;
 
@@ -2303,12 +2319,15 @@
   friend class HeapIterator;
   friend class IdleScavengeObserver;
   friend class IncrementalMarking;
+  friend class IncrementalMarkingJob;
   friend class IteratePromotedObjectsVisitor;
+  friend class LargeObjectSpace;
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
   friend class NewSpace;
   friend class ObjectStatsCollector;
   friend class Page;
+  friend class PagedSpace;
   friend class Scavenger;
   friend class StoreBuffer;
   friend class TestMemoryAllocatorScope;
@@ -2402,7 +2421,7 @@
 
 // Space iterator for iterating over all old spaces of the heap: Old space
 // and code space.  Returns each space in turn, and null when it is done.
-class OldSpaces BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
  public:
   explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
   OldSpace* next();
@@ -2427,23 +2446,17 @@
 };
 
 
-// Space iterator for iterating over all spaces of the heap.
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
 class SpaceIterator : public Malloced {
  public:
   explicit SpaceIterator(Heap* heap);
   virtual ~SpaceIterator();
 
   bool has_next();
-  ObjectIterator* next();
+  Space* next();
 
  private:
-  ObjectIterator* CreateIterator();
-
   Heap* heap_;
   int current_space_;         // from enum AllocationSpace.
-  ObjectIterator* iterator_;  // object iterator for the current space.
 };
 
 
@@ -2489,113 +2502,9 @@
   // Space iterator for iterating all the spaces.
   SpaceIterator* space_iterator_;
   // Object iterator for the space currently being iterated.
-  ObjectIterator* object_iterator_;
+  std::unique_ptr<ObjectIterator> object_iterator_;
 };
 
-
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
-  // Lookup field offset for (map, name). If absent, -1 is returned.
-  int Lookup(Handle<Map> map, Handle<Name> name);
-
-  // Update an element in the cache.
-  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
-
-  // Clear the cache.
-  void Clear();
-
-  static const int kLength = 256;
-  static const int kCapacityMask = kLength - 1;
-  static const int kMapHashShift = 5;
-  static const int kHashMask = -4;  // Zero the last two bits.
-  static const int kEntriesPerBucket = 4;
-  static const int kEntryLength = 2;
-  static const int kMapIndex = 0;
-  static const int kKeyIndex = 1;
-  static const int kNotFound = -1;
-
-  // kEntriesPerBucket should be a power of 2.
-  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
-  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
-  KeyedLookupCache() {
-    for (int i = 0; i < kLength; ++i) {
-      keys_[i].map = NULL;
-      keys_[i].name = NULL;
-      field_offsets_[i] = kNotFound;
-    }
-  }
-
-  static inline int Hash(Handle<Map> map, Handle<Name> name);
-
-  // Get the address of the keys and field_offsets arrays.  Used in
-  // generated code to perform cache lookups.
-  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
-
-  Address field_offsets_address() {
-    return reinterpret_cast<Address>(&field_offsets_);
-  }
-
-  struct Key {
-    Map* map;
-    Name* name;
-  };
-
-  Key keys_[kLength];
-  int field_offsets_[kLength];
-
-  friend class ExternalReference;
-  friend class Isolate;
-  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
-
-// Cache for mapping (map, property name) into descriptor index.
-// The cache contains both positive and negative results.
-// Descriptor index equals kNotFound means the property is absent.
-// Cleared at startup and prior to any gc.
-class DescriptorLookupCache {
- public:
-  // Lookup descriptor index for (map, name).
-  // If absent, kAbsent is returned.
-  inline int Lookup(Map* source, Name* name);
-
-  // Update an element in the cache.
-  inline void Update(Map* source, Name* name, int result);
-
-  // Clear the cache.
-  void Clear();
-
-  static const int kAbsent = -2;
-
- private:
-  DescriptorLookupCache() {
-    for (int i = 0; i < kLength; ++i) {
-      keys_[i].source = NULL;
-      keys_[i].name = NULL;
-      results_[i] = kAbsent;
-    }
-  }
-
-  static inline int Hash(Object* source, Name* name);
-
-  static const int kLength = 64;
-  struct Key {
-    Map* source;
-    Name* name;
-  };
-
-  Key keys_[kLength];
-  int results_[kLength];
-
-  friend class Isolate;
-  DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
-};
-
-
 // Abstract base class for checking whether a weak object should be retained.
 class WeakObjectRetainer {
  public:
@@ -2720,6 +2629,18 @@
   DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
 };
 
+class TracePossibleWrapperReporter : public EmbedderReachableReferenceReporter {
+ public:
+  explicit TracePossibleWrapperReporter(Heap* heap) : heap_(heap) {}
+  void ReportExternalReference(Value* object) override {
+    heap_->RegisterExternallyReferencedObject(
+        reinterpret_cast<Object**>(object));
+  }
+
+ private:
+  Heap* heap_;
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/incremental-marking-inl.h b/src/heap/incremental-marking-inl.h
index fa22da6..ee594b2 100644
--- a/src/heap/incremental-marking-inl.h
+++ b/src/heap/incremental-marking-inl.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_INCREMENTAL_MARKING_INL_H_
 
 #include "src/heap/incremental-marking.h"
+#include "src/isolate.h"
 
 namespace v8 {
 namespace internal {
@@ -33,6 +34,15 @@
   }
 }
 
+void IncrementalMarking::RestartIfNotMarking() {
+  if (state_ == COMPLETE) {
+    state_ = MARKING;
+    if (FLAG_trace_incremental_marking) {
+      heap()->isolate()->PrintWithTimestamp(
+          "[IncrementalMarking] Restarting (new grey objects)\n");
+    }
+  }
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/incremental-marking-job.cc b/src/heap/incremental-marking-job.cc
index fe14dd0..393b9cc 100644
--- a/src/heap/incremental-marking-job.cc
+++ b/src/heap/incremental-marking-job.cc
@@ -14,131 +14,49 @@
 namespace v8 {
 namespace internal {
 
-const double IncrementalMarkingJob::kLongDelayInSeconds = 5;
-const double IncrementalMarkingJob::kShortDelayInSeconds = 0.5;
-
 void IncrementalMarkingJob::Start(Heap* heap) {
   DCHECK(!heap->incremental_marking()->IsStopped());
-  // We don't need to reset the flags because tasks from the previous job
-  // can still be pending. We just want to ensure that tasks are posted
-  // if they are not pending.
-  // If delayed task is pending and made_progress_since_last_delayed_task_ is
-  // true, then the delayed task will clear that flag when it is rescheduled.
-  ScheduleIdleTask(heap);
-  ScheduleDelayedTask(heap);
+  ScheduleTask(heap);
 }
 
+void IncrementalMarkingJob::NotifyTask() { task_pending_ = false; }
 
-void IncrementalMarkingJob::NotifyIdleTask() { idle_task_pending_ = false; }
-
-
-void IncrementalMarkingJob::NotifyDelayedTask() {
-  delayed_task_pending_ = false;
-}
-
-
-void IncrementalMarkingJob::NotifyIdleTaskProgress() {
-  made_progress_since_last_delayed_task_ = true;
-}
-
-
-void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
-  if (!idle_task_pending_) {
+void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
+  if (!task_pending_) {
     v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
-    if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
-      idle_task_pending_ = true;
-      auto task = new IdleTask(heap->isolate(), this);
-      V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
-    }
+    task_pending_ = true;
+    auto task = new Task(heap->isolate(), this);
+    V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
   }
 }
 
-
-void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
-  if (!delayed_task_pending_ && FLAG_memory_reducer) {
-    v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
-    delayed_task_pending_ = true;
-    made_progress_since_last_delayed_task_ = false;
-    auto task = new DelayedTask(heap->isolate(), this);
-    double delay =
-        heap->HighMemoryPressure() ? kShortDelayInSeconds : kLongDelayInSeconds;
-    V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
-                                                            delay);
-  }
-}
-
-
-IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
-    Heap* heap, double deadline_in_ms) {
-  IncrementalMarking* incremental_marking = heap->incremental_marking();
-  if (incremental_marking->IsStopped()) {
-    return kDone;
-  }
-  if (incremental_marking->IsSweeping()) {
-    incremental_marking->FinalizeSweeping();
-    // TODO(hpayer): We can continue here if enough idle time is left.
-    return kMoreWork;
-  }
-  const double remaining_idle_time_in_ms =
-      incremental_marking->AdvanceIncrementalMarking(
-          deadline_in_ms, IncrementalMarking::IdleStepActions());
-  if (remaining_idle_time_in_ms > 0.0) {
-    heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
-  }
-  return incremental_marking->IsStopped() ? kDone : kMoreWork;
-}
-
-
-void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
-  double deadline_in_ms =
-      deadline_in_seconds *
-      static_cast<double>(base::Time::kMillisecondsPerSecond);
-  Heap* heap = isolate()->heap();
-  double start_ms = heap->MonotonicallyIncreasingTimeInMs();
-  job_->NotifyIdleTask();
-  job_->NotifyIdleTaskProgress();
-  if (Step(heap, deadline_in_ms) == kMoreWork) {
-    job_->ScheduleIdleTask(heap);
-  }
-  if (FLAG_trace_idle_notification) {
-    double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
-    double idle_time_in_ms = deadline_in_ms - start_ms;
-    double deadline_difference = deadline_in_ms - current_time_ms;
-    PrintIsolate(isolate(), "%8.0f ms: ", isolate()->time_millis_since_init());
-    PrintF(
-        "Idle task: requested idle time %.2f ms, used idle time %.2f "
-        "ms, deadline usage %.2f ms\n",
-        idle_time_in_ms, idle_time_in_ms - deadline_difference,
-        deadline_difference);
-  }
-}
-
-
-void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
-  const int kIncrementalMarkingDelayMs = 50;
+void IncrementalMarkingJob::Task::Step(Heap* heap) {
+  const int kIncrementalMarkingDelayMs = 1;
   double deadline =
       heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
   heap->incremental_marking()->AdvanceIncrementalMarking(
-      deadline, i::IncrementalMarking::StepActions(
-                    i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                    i::IncrementalMarking::FORCE_MARKING,
-                    i::IncrementalMarking::FORCE_COMPLETION));
+      deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+      i::IncrementalMarking::FORCE_COMPLETION, i::StepOrigin::kTask);
   heap->FinalizeIncrementalMarkingIfComplete(
-      "Incremental marking task: finalize incremental marking");
+      GarbageCollectionReason::kFinalizeMarkingViaTask);
 }
 
-
-void IncrementalMarkingJob::DelayedTask::RunInternal() {
+void IncrementalMarkingJob::Task::RunInternal() {
   Heap* heap = isolate()->heap();
-  job_->NotifyDelayedTask();
+  job_->NotifyTask();
   IncrementalMarking* incremental_marking = heap->incremental_marking();
-  if (!incremental_marking->IsStopped()) {
-    if (job_->ShouldForceMarkingStep()) {
-      Step(heap);
+  if (incremental_marking->IsStopped()) {
+    if (heap->IncrementalMarkingLimitReached() !=
+        Heap::IncrementalMarkingLimit::kNoLimit) {
+      heap->StartIncrementalMarking(Heap::kNoGCFlags,
+                                    GarbageCollectionReason::kIdleTask,
+                                    kNoGCCallbackFlags);
     }
-    // The Step() above could have finished incremental marking.
+  }
+  if (!incremental_marking->IsStopped()) {
+    Step(heap);
     if (!incremental_marking->IsStopped()) {
-      job_->ScheduleDelayedTask(heap);
+      job_->ScheduleTask(heap);
     }
   }
 }
diff --git a/src/heap/incremental-marking-job.h b/src/heap/incremental-marking-job.h
index 9c78182..ccc60c5 100644
--- a/src/heap/incremental-marking-job.h
+++ b/src/heap/incremental-marking-job.h
@@ -14,31 +14,13 @@
 class Isolate;
 
 // The incremental marking job uses platform tasks to perform incremental
-// marking steps. The job posts an idle and a delayed task with a large delay.
-// The delayed task performs steps only if the idle task is not making progress.
-// We expect this to be a rare event since incremental marking should finish
-// quickly with the help of the mutator and the idle task.
-// The delayed task guarantees that we eventually finish incremental marking
-// even if the mutator becomes idle and the platform stops running idle tasks,
-// which can happen for background tabs in Chrome.
+// marking steps. The job posts a foreground task that makes a small (~1ms)
+// step and posts another task until the marking is completed.
 class IncrementalMarkingJob {
  public:
-  class IdleTask : public CancelableIdleTask {
+  class Task : public CancelableTask {
    public:
-    explicit IdleTask(Isolate* isolate, IncrementalMarkingJob* job)
-        : CancelableIdleTask(isolate), job_(job) {}
-    enum Progress { kDone, kMoreWork };
-    static Progress Step(Heap* heap, double deadline_in_ms);
-    // CancelableIdleTask overrides.
-    void RunInternal(double deadline_in_seconds) override;
-
-   private:
-    IncrementalMarkingJob* job_;
-  };
-
-  class DelayedTask : public CancelableTask {
-   public:
-    explicit DelayedTask(Isolate* isolate, IncrementalMarkingJob* job)
+    explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
         : CancelableTask(isolate), job_(job) {}
     static void Step(Heap* heap);
     // CancelableTask overrides.
@@ -48,33 +30,18 @@
     IncrementalMarkingJob* job_;
   };
 
-  // Delay of the delayed task.
-  static const double kLongDelayInSeconds;
-  static const double kShortDelayInSeconds;
+  IncrementalMarkingJob() : task_pending_(false) {}
 
-  IncrementalMarkingJob()
-      : idle_task_pending_(false),
-        delayed_task_pending_(false),
-        made_progress_since_last_delayed_task_(false) {}
-
-  bool ShouldForceMarkingStep() {
-    return !made_progress_since_last_delayed_task_;
-  }
-
-  bool IdleTaskPending() { return idle_task_pending_; }
+  bool TaskPending() { return task_pending_; }
 
   void Start(Heap* heap);
 
-  void NotifyIdleTask();
-  void NotifyDelayedTask();
-  void NotifyIdleTaskProgress();
-  void ScheduleIdleTask(Heap* heap);
-  void ScheduleDelayedTask(Heap* heap);
+  void NotifyTask();
+
+  void ScheduleTask(Heap* heap);
 
  private:
-  bool idle_task_pending_;
-  bool delayed_task_pending_;
-  bool made_progress_since_last_delayed_task_;
+  bool task_pending_;
 };
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index b9e7c61..579228c 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -19,33 +19,22 @@
 namespace v8 {
 namespace internal {
 
-IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
-  return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                     IncrementalMarking::FORCE_MARKING,
-                     IncrementalMarking::DO_NOT_FORCE_COMPLETION);
-}
-
 IncrementalMarking::IncrementalMarking(Heap* heap)
     : heap_(heap),
-      observer_(*this, kAllocatedThreshold),
       state_(STOPPED),
-      is_compacting_(false),
-      steps_count_(0),
-      old_generation_space_available_at_start_of_incremental_(0),
-      old_generation_space_used_at_start_of_incremental_(0),
-      bytes_rescanned_(0),
-      should_hurry_(false),
-      marking_speed_(0),
-      bytes_scanned_(0),
-      allocated_(0),
-      write_barriers_invoked_since_last_step_(0),
-      idle_marking_delay_counter_(0),
+      initial_old_generation_size_(0),
+      bytes_marked_ahead_of_schedule_(0),
       unscanned_bytes_of_large_object_(0),
+      idle_marking_delay_counter_(0),
+      incremental_marking_finalization_rounds_(0),
+      is_compacting_(false),
+      should_hurry_(false),
       was_activated_(false),
       black_allocation_(false),
       finalize_marking_completed_(false),
-      incremental_marking_finalization_rounds_(0),
-      request_type_(NONE) {}
+      request_type_(NONE),
+      new_generation_observer_(*this, kAllocatedThreshold),
+      old_generation_observer_(*this, kAllocatedThreshold) {}
 
 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
   HeapObject* value_heap_obj = HeapObject::cast(value);
@@ -76,19 +65,7 @@
 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
                                              Isolate* isolate) {
   DCHECK(obj->IsHeapObject());
-  IncrementalMarking* marking = isolate->heap()->incremental_marking();
-
-  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-  int counter = chunk->write_barrier_counter();
-  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
-    marking->write_barriers_invoked_since_last_step_ +=
-        MemoryChunk::kWriteBarrierCounterGranularity -
-        chunk->write_barrier_counter();
-    chunk->set_write_barrier_counter(
-        MemoryChunk::kWriteBarrierCounterGranularity);
-  }
-
-  marking->RecordWrite(obj, slot, *slot);
+  isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
 }
 
 // static
@@ -202,20 +179,15 @@
     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
-    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
   }
 
   static const int kProgressBarScanningChunk = 32 * 1024;
 
   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-    // TODO(mstarzinger): Move setting of the flag to the allocation site of
-    // the array. The visitor should just check the flag.
-    if (FLAG_use_marking_progress_bar &&
-        chunk->owner()->identity() == LO_SPACE) {
-      chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
-    }
     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+      DCHECK(!FLAG_use_marking_progress_bar ||
+             chunk->owner()->identity() == LO_SPACE);
       Heap* heap = map->GetHeap();
       // When using a progress bar for large fixed arrays, scan only a chunk of
       // the array and try to push it onto the marking deque again until it is
@@ -423,22 +395,6 @@
 }
 
 
-bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
-#ifndef DEBUG
-  static const intptr_t kActivationThreshold = 8 * MB;
-#else
-  // TODO(gc) consider setting this to some low level so that some
-  // debug tests run with incremental marking and some without.
-  static const intptr_t kActivationThreshold = 0;
-#endif
-  // Don't switch on for very small heaps.
-  return CanBeActivated() &&
-         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
-         heap_->HeapIsFullEnoughToStartIncrementalMarking(
-             heap_->old_generation_allocation_limit());
-}
-
-
 bool IncrementalMarking::WasActivated() { return was_activated_; }
 
 
@@ -467,21 +423,6 @@
 }
 
 
-void IncrementalMarking::NotifyOfHighPromotionRate() {
-  if (IsMarking()) {
-    if (marking_speed_ < kFastMarking) {
-      if (FLAG_trace_gc) {
-        PrintIsolate(heap()->isolate(),
-                     "Increasing marking speed to %d "
-                     "due to high promotion rate\n",
-                     static_cast<int>(kFastMarking));
-      }
-      marking_speed_ = kFastMarking;
-    }
-  }
-}
-
-
 static void PatchIncrementalMarkingRecordWriteStubs(
     Heap* heap, RecordWriteStub::Mode mode) {
   UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -503,34 +444,60 @@
   }
 }
 
-
-void IncrementalMarking::Start(const char* reason) {
+void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start (%s)\n",
-           (reason == nullptr) ? "unknown reason" : reason);
+    int old_generation_size_mb =
+        static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+    int old_generation_limit_mb =
+        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
+        "slack %dMB\n",
+        Heap::GarbageCollectionReasonToString(gc_reason),
+        old_generation_size_mb, old_generation_limit_mb,
+        Max(0, old_generation_limit_mb - old_generation_size_mb));
   }
   DCHECK(FLAG_incremental_marking);
   DCHECK(state_ == STOPPED);
   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
   DCHECK(!heap_->isolate()->serializer_enabled());
 
-  HistogramTimerScope incremental_marking_scope(
-      heap_->isolate()->counters()->gc_incremental_marking_start());
-  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
-  ResetStepCounters();
+  Counters* counters = heap_->isolate()->counters();
 
+  counters->incremental_marking_reason()->AddSample(
+      static_cast<int>(gc_reason));
+  HistogramTimerScope incremental_marking_scope(
+      counters->gc_incremental_marking_start());
+  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
+  heap_->tracer()->NotifyIncrementalMarkingStart();
+
+  start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+  initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
+  old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
+  bytes_allocated_ = 0;
+  bytes_marked_ahead_of_schedule_ = 0;
+  should_hurry_ = false;
   was_activated_ = true;
 
   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
     StartMarking();
   } else {
     if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Start sweeping.\n");
+      heap()->isolate()->PrintWithTimestamp(
+          "[IncrementalMarking] Start sweeping.\n");
     }
     state_ = SWEEPING;
   }
 
-  heap_->new_space()->AddAllocationObserver(&observer_);
+  SpaceIterator it(heap_);
+  while (it.has_next()) {
+    Space* space = it.next();
+    if (space == heap_->new_space()) {
+      space->AddAllocationObserver(&new_generation_observer_);
+    } else {
+      space->AddAllocationObserver(&old_generation_observer_);
+    }
+  }
 
   incremental_marking_job()->Start(heap_);
 }
@@ -542,12 +509,14 @@
     // but we cannot enable black allocation while deserializing. Hence, we
     // have to delay the start of incremental marking in that case.
     if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Start delayed - serializer\n");
+      heap()->isolate()->PrintWithTimestamp(
+          "[IncrementalMarking] Start delayed - serializer\n");
     }
     return;
   }
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start marking\n");
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Start marking\n");
   }
 
   is_compacting_ = !FLAG_never_compact &&
@@ -559,7 +528,8 @@
   if (heap_->UsingEmbedderHeapTracer()) {
     TRACE_GC(heap()->tracer(),
              GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
-    heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+    heap_->embedder_heap_tracer()->TracePrologue(
+        heap_->embedder_reachable_reference_reporter());
   }
 
   RecordWriteStub::Mode mode = is_compacting_
@@ -589,7 +559,7 @@
 
   // Ready to start incremental marking.
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Running\n");
+    heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
   }
 }
 
@@ -601,7 +571,8 @@
   heap()->map_space()->MarkAllocationInfoBlack();
   heap()->code_space()->MarkAllocationInfoBlack();
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Black allocation started\n");
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Black allocation started\n");
   }
 }
 
@@ -609,11 +580,22 @@
   if (black_allocation_) {
     black_allocation_ = false;
     if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Black allocation finished\n");
+      heap()->isolate()->PrintWithTimestamp(
+          "[IncrementalMarking] Black allocation finished\n");
     }
   }
 }
 
+void IncrementalMarking::AbortBlackAllocation() {
+  for (Page* page : *heap()->old_space()) {
+    page->ReleaseBlackAreaEndMarkerMap();
+  }
+  if (FLAG_trace_incremental_marking) {
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Black allocation aborted\n");
+  }
+}
+
 void IncrementalMarking::MarkRoots() {
   DCHECK(!finalize_marking_completed_);
   DCHECK(IsMarking());
@@ -742,7 +724,6 @@
   }
 }
 
-
 void IncrementalMarking::FinalizeIncrementally() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
   DCHECK(!finalize_marking_completed_);
@@ -775,11 +756,12 @@
       abs(old_marking_deque_top -
           heap_->mark_compact_collector()->marking_deque()->top());
 
+  marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+
   double end = heap_->MonotonicallyIncreasingTimeInMs();
   double delta = end - start;
-  heap_->tracer()->AddMarkingTime(delta);
   if (FLAG_trace_incremental_marking) {
-    PrintF(
+    heap()->isolate()->PrintWithTimestamp(
         "[IncrementalMarking] Finalize incrementally round %d, "
         "spent %d ms, marking progress %d.\n",
         static_cast<int>(delta), incremental_marking_finalization_rounds_,
@@ -926,23 +908,23 @@
   // because should_hurry_ will force a full GC.
   if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
     double start = 0.0;
-    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+    if (FLAG_trace_incremental_marking) {
       start = heap_->MonotonicallyIncreasingTimeInMs();
       if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Hurry\n");
+        heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
       }
     }
     // TODO(gc) hurry can mark objects it encounters black as mutator
     // was stopped.
     ProcessMarkingDeque(0, FORCE_COMPLETION);
     state_ = COMPLETE;
-    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+    if (FLAG_trace_incremental_marking) {
       double end = heap_->MonotonicallyIncreasingTimeInMs();
       double delta = end - start;
-      heap_->tracer()->AddMarkingTime(delta);
       if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
-               static_cast<int>(delta));
+        heap()->isolate()->PrintWithTimestamp(
+            "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+            static_cast<int>(delta));
       }
     }
   }
@@ -968,12 +950,28 @@
 void IncrementalMarking::Stop() {
   if (IsStopped()) return;
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Stopping.\n");
+    int old_generation_size_mb =
+        static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+    int old_generation_limit_mb =
+        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
+        "overshoot %dMB\n",
+        old_generation_size_mb, old_generation_limit_mb,
+        Max(0, old_generation_size_mb - old_generation_limit_mb));
   }
 
-  heap_->new_space()->RemoveAllocationObserver(&observer_);
+  SpaceIterator it(heap_);
+  while (it.has_next()) {
+    Space* space = it.next();
+    if (space == heap_->new_space()) {
+      space->RemoveAllocationObserver(&new_generation_observer_);
+    } else {
+      space->RemoveAllocationObserver(&old_generation_observer_);
+    }
+  }
+
   IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
   if (IsMarking()) {
     PatchIncrementalMarkingRecordWriteStubs(heap_,
                                             RecordWriteStub::STORE_BUFFER_ONLY);
@@ -995,7 +993,7 @@
 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
   DCHECK(!finalize_marking_completed_);
   if (FLAG_trace_incremental_marking) {
-    PrintF(
+    heap()->isolate()->PrintWithTimestamp(
         "[IncrementalMarking] requesting finalization of incremental "
         "marking.\n");
   }
@@ -1015,7 +1013,8 @@
   // the should-hurry flag to indicate that there can't be much work left to do.
   set_should_hurry(true);
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Complete (normal).\n");
+    heap()->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Complete (normal).\n");
   }
   request_type_ = COMPLETE_MARKING;
   if (action == GC_VIA_STACK_GUARD) {
@@ -1031,246 +1030,163 @@
 }
 
 double IncrementalMarking::AdvanceIncrementalMarking(
-    double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
+    double deadline_in_ms, CompletionAction completion_action,
+    ForceCompletionAction force_completion, StepOrigin step_origin) {
   DCHECK(!IsStopped());
 
-  intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
-      GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
-      heap()
-          ->tracer()
-          ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
   double remaining_time_in_ms = 0.0;
-  intptr_t bytes_processed = 0;
+  intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+      kStepSizeInMs,
+      heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
 
   do {
-    bytes_processed =
-        Step(step_size_in_bytes, step_actions.completion_action,
-             step_actions.force_marking, step_actions.force_completion);
+    Step(step_size_in_bytes, completion_action, force_completion, step_origin);
     remaining_time_in_ms =
         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
-  } while (bytes_processed > 0 &&
-           remaining_time_in_ms >=
-               2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
-           !IsComplete() &&
+  } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
            !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
   return remaining_time_in_ms;
 }
 
 
-void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
-  if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
-    heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
-                                    "old space step");
-  } else {
-    Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
-  }
-}
-
-
-void IncrementalMarking::SpeedUp() {
-  bool speed_up = false;
-
-  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
-    if (FLAG_trace_incremental_marking) {
-      PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
-                   static_cast<int>(kMarkingSpeedAccellerationInterval));
-    }
-    speed_up = true;
-  }
-
-  bool space_left_is_very_small =
-      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
-  bool only_1_nth_of_space_that_was_available_still_left =
-      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
-       old_generation_space_available_at_start_of_incremental_);
-
-  if (space_left_is_very_small ||
-      only_1_nth_of_space_that_was_available_still_left) {
-    if (FLAG_trace_incremental_marking)
-      PrintIsolate(heap()->isolate(),
-                   "Speed up marking because of low space left\n");
-    speed_up = true;
-  }
-
-  bool size_of_old_space_multiplied_by_n_during_marking =
-      (heap_->PromotedTotalSize() >
-       (marking_speed_ + 1) *
-           old_generation_space_used_at_start_of_incremental_);
-  if (size_of_old_space_multiplied_by_n_during_marking) {
-    speed_up = true;
-    if (FLAG_trace_incremental_marking) {
-      PrintIsolate(heap()->isolate(),
-                   "Speed up marking because of heap size increase\n");
-    }
-  }
-
-  int64_t promoted_during_marking =
-      heap_->PromotedTotalSize() -
-      old_generation_space_used_at_start_of_incremental_;
-  intptr_t delay = marking_speed_ * MB;
-  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
-  // We try to scan at at least twice the speed that we are allocating.
-  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
-    if (FLAG_trace_incremental_marking) {
-      PrintIsolate(heap()->isolate(),
-                   "Speed up marking because marker was not keeping up\n");
-    }
-    speed_up = true;
-  }
-
-  if (speed_up) {
-    if (state_ != MARKING) {
-      if (FLAG_trace_incremental_marking) {
-        PrintIsolate(heap()->isolate(),
-                     "Postponing speeding up marking until marking starts\n");
-      }
-    } else {
-      marking_speed_ += kMarkingSpeedAccelleration;
-      marking_speed_ = static_cast<int>(
-          Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
-      if (FLAG_trace_incremental_marking) {
-        PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
-                     marking_speed_);
-      }
-    }
-  }
-}
-
 void IncrementalMarking::FinalizeSweeping() {
   DCHECK(state_ == SWEEPING);
   if (heap_->mark_compact_collector()->sweeping_in_progress() &&
-      (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
-       !FLAG_concurrent_sweeping)) {
+      (!FLAG_concurrent_sweeping ||
+       heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
     heap_->mark_compact_collector()->EnsureSweepingCompleted();
   }
   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
-    bytes_scanned_ = 0;
     StartMarking();
   }
 }
 
-intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
-                                  CompletionAction action,
-                                  ForceMarkingAction marking,
-                                  ForceCompletionAction completion) {
-  DCHECK(allocated_bytes >= 0);
+size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
+  // Update bytes_allocated_ based on the allocation counter.
+  size_t current_counter = heap_->OldGenerationAllocationCounter();
+  bytes_allocated_ += current_counter - old_generation_allocation_counter_;
+  old_generation_allocation_counter_ = current_counter;
+  return bytes_allocated_;
+}
 
+size_t IncrementalMarking::StepSizeToMakeProgress() {
+  // We increase step size gradually based on the time passed in order to
+  // leave marking work to standalone tasks. The ramp up duration and the
+  // target step count are chosen based on benchmarks.
+  const int kRampUpIntervalMs = 300;
+  const size_t kTargetStepCount = 128;
+  size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
+                         IncrementalMarking::kAllocatedThreshold);
+  double time_passed_ms =
+      heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
+  double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
+  return static_cast<size_t>(factor * step_size);
+}
+
+void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
       (state_ != SWEEPING && state_ != MARKING)) {
-    return 0;
+    return;
   }
 
-  allocated_ += allocated_bytes;
+  size_t bytes_to_process =
+      StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
 
-  if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
-      write_barriers_invoked_since_last_step_ <
-          kWriteBarriersInvokedThreshold) {
-    return 0;
-  }
+  if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
+    // The first step after Scavenge will see many allocated bytes.
+    // Cap the step size to distribute the marking work more uniformly.
+    size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+        kMaxStepSizeInMs,
+        heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+    bytes_to_process = Min(bytes_to_process, max_step_size);
 
-  // If an idle notification happened recently, we delay marking steps.
-  if (marking == DO_NOT_FORCE_MARKING &&
-      heap_->RecentIdleNotificationHappened()) {
-    return 0;
-  }
-
-  intptr_t bytes_processed = 0;
-  {
-    HistogramTimerScope incremental_marking_scope(
-        heap_->isolate()->counters()->gc_incremental_marking());
-    TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
-    TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
-    double start = heap_->MonotonicallyIncreasingTimeInMs();
-
-    // The marking speed is driven either by the allocation rate or by the rate
-    // at which we are having to check the color of objects in the write
-    // barrier.
-    // It is possible for a tight non-allocating loop to run a lot of write
-    // barriers before we get here and check them (marking can only take place
-    // on
-    // allocation), so to reduce the lumpiness we don't use the write barriers
-    // invoked since last step directly to determine the amount of work to do.
-    intptr_t bytes_to_process =
-        marking_speed_ *
-        Max(allocated_, write_barriers_invoked_since_last_step_);
-    allocated_ = 0;
-    write_barriers_invoked_since_last_step_ = 0;
-
-    bytes_scanned_ += bytes_to_process;
-
-    // TODO(hpayer): Do not account for sweeping finalization while marking.
-    if (state_ == SWEEPING) {
-      FinalizeSweeping();
+    size_t bytes_processed = 0;
+    if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
+      // Steps performed in tasks have put us ahead of schedule.
+      // We skip processing of marking dequeue here and thus
+      // shift marking time from inside V8 to standalone tasks.
+      bytes_marked_ahead_of_schedule_ -= bytes_to_process;
+      bytes_processed = bytes_to_process;
+    } else {
+      bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
+                             FORCE_COMPLETION, StepOrigin::kV8);
     }
+    bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
+  }
+}
 
-    if (state_ == MARKING) {
+size_t IncrementalMarking::Step(size_t bytes_to_process,
+                                CompletionAction action,
+                                ForceCompletionAction completion,
+                                StepOrigin step_origin) {
+  HistogramTimerScope incremental_marking_scope(
+      heap_->isolate()->counters()->gc_incremental_marking());
+  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+  double start = heap_->MonotonicallyIncreasingTimeInMs();
+
+  if (state_ == SWEEPING) {
+    TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
+    FinalizeSweeping();
+  }
+
+  size_t bytes_processed = 0;
+  if (state_ == MARKING) {
+    const bool incremental_wrapper_tracing =
+        FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
+    const bool process_wrappers =
+        incremental_wrapper_tracing &&
+        (heap_->RequiresImmediateWrapperProcessing() ||
+         heap_->mark_compact_collector()->marking_deque()->IsEmpty());
+    bool wrapper_work_left = incremental_wrapper_tracing;
+    if (!process_wrappers) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
-      if (FLAG_incremental_marking_wrappers &&
-          heap_->UsingEmbedderHeapTracer()) {
-        TRACE_GC(heap()->tracer(),
-                 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
-        // This currently marks through all registered wrappers and does not
-        // respect bytes_to_process.
-        // TODO(hpayer): Integrate incremental marking of wrappers into
-        // bytes_to_process logic.
-        heap_->mark_compact_collector()
-            ->RegisterWrappersWithEmbedderHeapTracer();
-        heap_->mark_compact_collector()->embedder_heap_tracer()->AdvanceTracing(
-            0,
-            EmbedderHeapTracer::AdvanceTracingActions(
-                EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+      if (step_origin == StepOrigin::kTask) {
+        bytes_marked_ahead_of_schedule_ += bytes_processed;
       }
-      if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
-        if (completion == FORCE_COMPLETION ||
-            IsIdleMarkingDelayCounterLimitReached()) {
-          if (!finalize_marking_completed_) {
-            FinalizeMarking(action);
-          } else {
-            MarkingComplete(action);
-          }
-        } else {
-          IncrementIdleMarkingDelayCounter();
-        }
-      }
+    } else {
+      const double wrapper_deadline =
+          heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+      TRACE_GC(heap()->tracer(),
+               GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+      heap_->RegisterWrappersWithEmbedderHeapTracer();
+      wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
+          wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+                                EmbedderHeapTracer::ForceCompletionAction::
+                                    DO_NOT_FORCE_COMPLETION));
     }
 
-    steps_count_++;
+    if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
+        !wrapper_work_left) {
+      if (completion == FORCE_COMPLETION ||
+          IsIdleMarkingDelayCounterLimitReached()) {
+        if (!finalize_marking_completed_) {
+          FinalizeMarking(action);
+        } else {
+          MarkingComplete(action);
+        }
+      } else {
+        IncrementIdleMarkingDelayCounter();
+      }
+    }
+  }
 
-    // Speed up marking if we are marking too slow or if we are almost done
-    // with marking.
-    SpeedUp();
-
-    double end = heap_->MonotonicallyIncreasingTimeInMs();
-    double duration = (end - start);
-    // Note that we report zero bytes here when sweeping was in progress or
-    // when we just started incremental marking. In these cases we did not
-    // process the marking deque.
-    heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+  double end = heap_->MonotonicallyIncreasingTimeInMs();
+  double duration = (end - start);
+  // Note that we report zero bytes here when sweeping was in progress or
+  // when we just started incremental marking. In these cases we did not
+  // process the marking deque.
+  heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+  if (FLAG_trace_incremental_marking) {
+    heap_->isolate()->PrintWithTimestamp(
+        "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
+        step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
+        bytes_to_process, duration);
   }
   return bytes_processed;
 }
 
 
-void IncrementalMarking::ResetStepCounters() {
-  steps_count_ = 0;
-  old_generation_space_available_at_start_of_incremental_ =
-      SpaceLeftInOldSpace();
-  old_generation_space_used_at_start_of_incremental_ =
-      heap_->PromotedTotalSize();
-  bytes_rescanned_ = 0;
-  marking_speed_ = kInitialMarkingSpeed;
-  bytes_scanned_ = 0;
-  write_barriers_invoked_since_last_step_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
-}
-
-
 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
   return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
 }
@@ -1284,5 +1200,6 @@
 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
   idle_marking_delay_counter_ = 0;
 }
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 877f05e..c2290c4 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -20,33 +20,18 @@
 class MarkBit;
 class PagedSpace;
 
+enum class StepOrigin { kV8, kTask };
+
 class IncrementalMarking {
  public:
   enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
 
   enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
 
-  enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
-
   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
 
   enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
 
-  struct StepActions {
-    StepActions(CompletionAction complete_action_,
-                ForceMarkingAction force_marking_,
-                ForceCompletionAction force_completion_)
-        : completion_action(complete_action_),
-          force_marking(force_marking_),
-          force_completion(force_completion_) {}
-
-    CompletionAction completion_action;
-    ForceMarkingAction force_marking;
-    ForceCompletionAction force_completion;
-  };
-
-  static StepActions IdleStepActions();
-
   explicit IncrementalMarking(Heap* heap);
 
   static void Initialize();
@@ -87,11 +72,9 @@
 
   bool CanBeActivated();
 
-  bool ShouldActivateEvenWithoutIdleNotification();
-
   bool WasActivated();
 
-  void Start(const char* reason = nullptr);
+  void Start(GarbageCollectionReason gc_reason);
 
   void FinalizeIncrementally();
 
@@ -113,7 +96,9 @@
   // returns the remaining time that cannot be used for incremental marking
   // anymore because a single step would exceed the deadline.
   double AdvanceIncrementalMarking(double deadline_in_ms,
-                                   StepActions step_actions);
+                                   CompletionAction completion_action,
+                                   ForceCompletionAction force_completion,
+                                   StepOrigin step_origin);
 
   // It's hard to know how much work the incremental marker should do to make
   // progress in the face of the mutator creating new work for it.  We start
@@ -121,39 +106,27 @@
   // incremental marker until it completes.
   // Do some marking every time this much memory has been allocated or that many
   // heavy (color-checking) write barriers have been invoked.
-  static const intptr_t kAllocatedThreshold = 65536;
-  static const intptr_t kWriteBarriersInvokedThreshold = 32768;
-  // Start off by marking this many times more memory than has been allocated.
-  static const intptr_t kInitialMarkingSpeed = 1;
-  // But if we are promoting a lot of data we need to mark faster to keep up
-  // with the data that is entering the old space through promotion.
-  static const intptr_t kFastMarking = 3;
-  // After this many steps we increase the marking/allocating factor.
-  static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
-  // This is how much we increase the marking/allocating factor by.
-  static const intptr_t kMarkingSpeedAccelleration = 2;
-  static const intptr_t kMaxMarkingSpeed = 1000;
+  static const size_t kAllocatedThreshold = 64 * KB;
+
+  static const int kStepSizeInMs = 1;
+  static const int kMaxStepSizeInMs = 5;
 
   // This is the upper bound for how many times we allow finalization of
   // incremental marking to be postponed.
-  static const size_t kMaxIdleMarkingDelayCounter = 3;
+  static const int kMaxIdleMarkingDelayCounter = 3;
+
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  static const intptr_t kActivationThreshold = 0;
+#endif
 
   void FinalizeSweeping();
 
-  void OldSpaceStep(intptr_t allocated);
+  size_t Step(size_t bytes_to_process, CompletionAction action,
+              ForceCompletionAction completion, StepOrigin step_origin);
 
-  intptr_t Step(intptr_t allocated, CompletionAction action,
-                ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
-                ForceCompletionAction completion = FORCE_COMPLETION);
-
-  inline void RestartIfNotMarking() {
-    if (state_ == COMPLETE) {
-      state_ = MARKING;
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
-      }
-    }
-  }
+  inline void RestartIfNotMarking();
 
   static void RecordWriteFromCode(HeapObject* obj, Object** slot,
                                   Isolate* isolate);
@@ -173,8 +146,8 @@
   INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                      Code* value));
 
-
-  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
+                                         Object* value);
   void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
@@ -194,8 +167,6 @@
 
   void ActivateGeneratedStub(Code* stub);
 
-  void NotifyOfHighPromotionRate();
-
   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
     unscanned_bytes_of_large_object_ = unscanned_bytes;
   }
@@ -244,6 +215,8 @@
 
   void StartBlackAllocationForTesting() { StartBlackAllocation(); }
 
+  void AbortBlackAllocation();
+
  private:
   class Observer : public AllocationObserver {
    public:
@@ -252,8 +225,7 @@
           incremental_marking_(incremental_marking) {}
 
     void Step(int bytes_allocated, Address, size_t) override {
-      incremental_marking_.Step(bytes_allocated,
-                                IncrementalMarking::GC_VIA_STACK_GUARD);
+      incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
     }
 
    private:
@@ -262,10 +234,6 @@
 
   int64_t SpaceLeftInOldSpace();
 
-  void SpeedUp();
-
-  void ResetStepCounters();
-
   void StartMarking();
 
   void StartBlackAllocation();
@@ -301,37 +269,36 @@
 
   void IncrementIdleMarkingDelayCounter();
 
+  void AdvanceIncrementalMarkingOnAllocation();
+
+  size_t StepSizeToKeepUpWithAllocations();
+  size_t StepSizeToMakeProgress();
+
   Heap* heap_;
 
-  Observer observer_;
-
   State state_;
-  bool is_compacting_;
 
-  int steps_count_;
-  int64_t old_generation_space_available_at_start_of_incremental_;
-  int64_t old_generation_space_used_at_start_of_incremental_;
-  int64_t bytes_rescanned_;
-  bool should_hurry_;
-  int marking_speed_;
-  intptr_t bytes_scanned_;
-  intptr_t allocated_;
-  intptr_t write_barriers_invoked_since_last_step_;
-  size_t idle_marking_delay_counter_;
+  double start_time_ms_;
+  size_t initial_old_generation_size_;
+  size_t old_generation_allocation_counter_;
+  size_t bytes_allocated_;
+  size_t bytes_marked_ahead_of_schedule_;
+  size_t unscanned_bytes_of_large_object_;
 
-  int unscanned_bytes_of_large_object_;
-
-  bool was_activated_;
-
-  bool black_allocation_;
-
-  bool finalize_marking_completed_;
-
+  int idle_marking_delay_counter_;
   int incremental_marking_finalization_rounds_;
 
+  bool is_compacting_;
+  bool should_hurry_;
+  bool was_activated_;
+  bool black_allocation_;
+  bool finalize_marking_completed_;
+
   GCRequestType request_type_;
 
   IncrementalMarkingJob incremental_marking_job_;
+  Observer new_generation_observer_;
+  Observer old_generation_observer_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
 };
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 7ead421..fe71fb1 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -14,7 +14,7 @@
 
 void MarkCompactCollector::PushBlack(HeapObject* obj) {
   DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
-  if (marking_deque_.Push(obj)) {
+  if (marking_deque()->Push(obj)) {
     MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
   } else {
     MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
@@ -25,7 +25,7 @@
 
 void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
   DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
-  if (!marking_deque_.Unshift(obj)) {
+  if (!marking_deque()->Unshift(obj)) {
     MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
     MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
     Marking::BlackToGrey(mark_bit);
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index ae7b467..7e5ef96 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -61,7 +61,6 @@
       marking_deque_memory_(NULL),
       marking_deque_memory_committed_(0),
       code_flusher_(nullptr),
-      embedder_heap_tracer_(nullptr),
       sweeper_(heap) {
 }
 
@@ -567,6 +566,7 @@
 }
 
 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+  DCHECK(FLAG_concurrent_sweeping);
   while (pending_sweeper_tasks_semaphore_.WaitFor(
       base::TimeDelta::FromSeconds(0))) {
     num_sweeping_tasks_.Increment(-1);
@@ -600,7 +600,7 @@
   // For memory reducing and optimize for memory mode we directly define both
   // constants.
   const int kTargetFragmentationPercentForReduceMemory = 20;
-  const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
+  const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
   const int kTargetFragmentationPercentForOptimizeMemory = 20;
   const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
 
@@ -608,10 +608,10 @@
   // defaults to start and switch to a trace-based (using compaction speed)
   // approach as soon as we have enough samples.
   const int kTargetFragmentationPercent = 70;
-  const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+  const int kMaxEvacuatedBytes = 4 * MB;
   // Time to take for a single area (=payload of page). Used as soon as there
   // exist enough compaction speed samples.
-  const int kTargetMsPerArea = 1;
+  const float kTargetMsPerArea = .5;
 
   if (heap()->ShouldReduceMemory()) {
     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
@@ -801,13 +801,14 @@
   // Clear marking bits if incremental marking is aborted.
   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
     heap()->incremental_marking()->Stop();
+    heap()->incremental_marking()->AbortBlackAllocation();
     ClearMarkbits();
     AbortWeakCollections();
     AbortWeakCells();
     AbortTransitionArrays();
     AbortCompaction();
     if (heap_->UsingEmbedderHeapTracer()) {
-      heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+      heap_->embedder_heap_tracer()->AbortTracing();
     }
     was_marked_incrementally_ = false;
   }
@@ -815,12 +816,13 @@
   if (!was_marked_incrementally_) {
     if (heap_->UsingEmbedderHeapTracer()) {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
-      heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+      heap_->embedder_heap_tracer()->TracePrologue(
+          heap_->embedder_reachable_reference_reporter());
     }
   }
 
-  if (UsingEmbedderHeapTracer()) {
-    embedder_heap_tracer()->EnterFinalPause();
+  if (heap_->UsingEmbedderHeapTracer()) {
+    heap_->embedder_heap_tracer()->EnterFinalPause();
   }
 
   // Don't start compaction if we are in the middle of incremental
@@ -1244,7 +1246,7 @@
     Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
-      VisitJSRegExp(map, object);
+      JSObjectVisitor::Visit(map, object);
       return;
     }
     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
@@ -1252,7 +1254,7 @@
     UpdateRegExpCodeAgeAndFlush(heap, re, true);
     UpdateRegExpCodeAgeAndFlush(heap, re, false);
     // Visit the fields of the RegExp, including the updated FixedArray.
-    VisitJSRegExp(map, object);
+    JSObjectVisitor::Visit(map, object);
   }
 };
 
@@ -1975,7 +1977,7 @@
   MarkStringTable(visitor);
 
   // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_deque_.overflowed()) {
+  while (marking_deque()->overflowed()) {
     RefillMarkingDeque();
     EmptyMarkingDeque();
   }
@@ -2018,8 +2020,8 @@
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
 void MarkCompactCollector::EmptyMarkingDeque() {
-  while (!marking_deque_.IsEmpty()) {
-    HeapObject* object = marking_deque_.Pop();
+  while (!marking_deque()->IsEmpty()) {
+    HeapObject* object = marking_deque()->Pop();
 
     DCHECK(!object->IsFiller());
     DCHECK(object->IsHeapObject());
@@ -2042,25 +2044,25 @@
 // is cleared.
 void MarkCompactCollector::RefillMarkingDeque() {
   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
-  DCHECK(marking_deque_.overflowed());
+  DCHECK(marking_deque()->overflowed());
 
   DiscoverGreyObjectsInNewSpace();
-  if (marking_deque_.IsFull()) return;
+  if (marking_deque()->IsFull()) return;
 
   DiscoverGreyObjectsInSpace(heap()->old_space());
-  if (marking_deque_.IsFull()) return;
+  if (marking_deque()->IsFull()) return;
 
   DiscoverGreyObjectsInSpace(heap()->code_space());
-  if (marking_deque_.IsFull()) return;
+  if (marking_deque()->IsFull()) return;
 
   DiscoverGreyObjectsInSpace(heap()->map_space());
-  if (marking_deque_.IsFull()) return;
+  if (marking_deque()->IsFull()) return;
 
   LargeObjectIterator lo_it(heap()->lo_space());
   DiscoverGreyObjectsWithIterator(&lo_it);
-  if (marking_deque_.IsFull()) return;
+  if (marking_deque()->IsFull()) return;
 
-  marking_deque_.ClearOverflowed();
+  marking_deque()->ClearOverflowed();
 }
 
 
@@ -2070,7 +2072,7 @@
 // objects in the heap.
 void MarkCompactCollector::ProcessMarkingDeque() {
   EmptyMarkingDeque();
-  while (marking_deque_.overflowed()) {
+  while (marking_deque()->overflowed()) {
     RefillMarkingDeque();
     EmptyMarkingDeque();
   }
@@ -2080,13 +2082,13 @@
 // stack including references only considered in the atomic marking pause.
 void MarkCompactCollector::ProcessEphemeralMarking(
     ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
-  DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
+  DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
   bool work_to_do = true;
   while (work_to_do) {
-    if (UsingEmbedderHeapTracer()) {
+    if (heap_->UsingEmbedderHeapTracer()) {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
-      RegisterWrappersWithEmbedderHeapTracer();
-      embedder_heap_tracer()->AdvanceTracing(
+      heap_->RegisterWrappersWithEmbedderHeapTracer();
+      heap_->embedder_heap_tracer()->AdvanceTracing(
           0, EmbedderHeapTracer::AdvanceTracingActions(
                  EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
     }
@@ -2097,7 +2099,7 @@
       MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
     }
     ProcessWeakCollections();
-    work_to_do = !marking_deque_.IsEmpty();
+    work_to_do = !marking_deque()->IsEmpty();
     ProcessMarkingDeque();
   }
 }
@@ -2121,7 +2123,7 @@
 
 
 void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
-  DCHECK(!marking_deque_.in_use());
+  DCHECK(!marking_deque()->in_use());
   if (marking_deque_memory_ == NULL) {
     marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
     marking_deque_memory_committed_ = 0;
@@ -2135,7 +2137,7 @@
 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
   // If the marking deque is too small, we try to allocate a bigger one.
   // If that fails, make do with a smaller one.
-  CHECK(!marking_deque_.in_use());
+  CHECK(!marking_deque()->in_use());
   for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
     base::VirtualMemory* memory = marking_deque_memory_;
     size_t currently_committed = marking_deque_memory_committed_;
@@ -2167,12 +2169,12 @@
 
 
 void MarkCompactCollector::InitializeMarkingDeque() {
-  DCHECK(!marking_deque_.in_use());
+  DCHECK(!marking_deque()->in_use());
   DCHECK(marking_deque_memory_committed_ > 0);
   Address addr = static_cast<Address>(marking_deque_memory_->address());
   size_t size = marking_deque_memory_committed_;
   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-  marking_deque_.Initialize(addr, addr + size);
+  marking_deque()->Initialize(addr, addr + size);
 }
 
 
@@ -2200,34 +2202,6 @@
   in_use_ = false;
 }
 
-void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
-  DCHECK_NOT_NULL(tracer);
-  CHECK_NULL(embedder_heap_tracer_);
-  embedder_heap_tracer_ = tracer;
-}
-
-void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
-  DCHECK(UsingEmbedderHeapTracer());
-  if (wrappers_to_trace_.empty()) {
-    return;
-  }
-  embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
-  wrappers_to_trace_.clear();
-}
-
-void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
-  DCHECK(js_object->WasConstructedFromApiFunction());
-  if (js_object->GetInternalFieldCount() >= 2 &&
-      js_object->GetInternalField(0) &&
-      js_object->GetInternalField(0) != heap_->undefined_value() &&
-      js_object->GetInternalField(1) != heap_->undefined_value()) {
-    DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
-    wrappers_to_trace_.push_back(std::pair<void*, void*>(
-        reinterpret_cast<void*>(js_object->GetInternalField(0)),
-        reinterpret_cast<void*>(js_object->GetInternalField(1))));
-  }
-}
-
 class MarkCompactCollector::ObjectStatsVisitor
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
@@ -2259,8 +2233,9 @@
   SpaceIterator space_it(heap());
   HeapObject* obj = nullptr;
   while (space_it.has_next()) {
-    ObjectIterator* it = space_it.next();
-    while ((obj = it->Next()) != nullptr) {
+    std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
+    ObjectIterator* obj_it = it.get();
+    while ((obj = obj_it->Next()) != nullptr) {
       visitor->Visit(obj);
     }
   }
@@ -2271,6 +2246,13 @@
     ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
                                heap()->dead_object_stats_);
     VisitAllObjects(&visitor);
+    std::stringstream live, dead;
+    heap()->live_object_stats_->Dump(live);
+    heap()->dead_object_stats_->Dump(dead);
+    TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+                         "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+                         "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+                         TRACE_STR_COPY(dead.str().c_str()));
     if (FLAG_trace_gc_object_stats) {
       heap()->live_object_stats_->PrintJSON("live");
       heap()->dead_object_stats_->PrintJSON("dead");
@@ -2282,10 +2264,6 @@
 
 void MarkCompactCollector::MarkLiveObjects() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
-  double start_time = 0.0;
-  if (FLAG_print_cumulative_gc_stat) {
-    start_time = heap_->MonotonicallyIncreasingTimeInMs();
-  }
   // The recursive GC marker detects when it is nearing stack overflow,
   // and switches to a different marking system.  JS interrupts interfere
   // with the C stack limit check.
@@ -2299,8 +2277,8 @@
     } else {
       // Abort any pending incremental activities e.g. incremental sweeping.
       incremental_marking->Stop();
-      if (marking_deque_.in_use()) {
-        marking_deque_.Uninitialize(true);
+      if (marking_deque()->in_use()) {
+        marking_deque()->Uninitialize(true);
       }
     }
   }
@@ -2369,17 +2347,12 @@
     {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
       ProcessEphemeralMarking(&root_visitor, true);
-      if (UsingEmbedderHeapTracer()) {
+      if (heap_->UsingEmbedderHeapTracer()) {
         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
-        embedder_heap_tracer()->TraceEpilogue();
+        heap()->embedder_heap_tracer()->TraceEpilogue();
       }
     }
   }
-
-  if (FLAG_print_cumulative_gc_stat) {
-    heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
-                                    start_time);
-  }
 }
 
 
@@ -3079,8 +3052,7 @@
   explicit Evacuator(MarkCompactCollector* collector)
       : collector_(collector),
         compaction_spaces_(collector->heap()),
-        local_pretenuring_feedback_(base::HashMap::PointersMatch,
-                                    kInitialLocalPretenuringFeedbackCapacity),
+        local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
                            &local_pretenuring_feedback_),
         new_space_page_visitor(collector->heap()),
@@ -3221,7 +3193,7 @@
   // The number of parallel compaction tasks is limited by:
   // - #evacuation pages
   // - (#cores - 1)
-  const double kTargetCompactionTimeInMs = 1;
+  const double kTargetCompactionTimeInMs = .5;
   const int kNumSweepingTasks = 3;
 
   double compaction_speed =
@@ -3299,10 +3271,11 @@
     job.AddPage(page, &abandoned_pages);
   }
 
+  const bool reduce_memory = heap()->ShouldReduceMemory();
   const Address age_mark = heap()->new_space()->age_mark();
   for (Page* page : newspace_evacuation_candidates_) {
     live_bytes += page->LiveBytes();
-    if (!page->NeverEvacuate() &&
+    if (!reduce_memory && !page->NeverEvacuate() &&
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
         !page->Contains(age_mark)) {
       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -3858,6 +3831,15 @@
     } else {
       max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
     }
+
+    // After finishing sweeping of a page we clean up its remembered set.
+    if (page->typed_old_to_new_slots()) {
+      page->typed_old_to_new_slots()->FreeToBeFreedChunks();
+    }
+    if (page->old_to_new_slots()) {
+      page->old_to_new_slots()->FreeToBeFreedBuckets();
+    }
+
     {
       base::LockGuard<base::Mutex> guard(&mutex_);
       swept_list_[identity].Add(page);
@@ -3964,11 +3946,6 @@
 
 void MarkCompactCollector::SweepSpaces() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
-  double start_time = 0.0;
-  if (FLAG_print_cumulative_gc_stat) {
-    start_time = heap_->MonotonicallyIncreasingTimeInMs();
-  }
-
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
 #endif
@@ -3994,11 +3971,6 @@
 
   // Deallocate unmarked large objects.
   heap_->lo_space()->FreeUnmarkedObjects();
-
-  if (FLAG_print_cumulative_gc_stat) {
-    heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
-                                     start_time);
-  }
 }
 
 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index b2c637b..2cbb369 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -467,7 +467,7 @@
   static const size_t kMinMarkingDequeSize = 256 * KB;
 
   void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
-    if (!marking_deque_.in_use()) {
+    if (!marking_deque()->in_use()) {
       EnsureMarkingDequeIsCommitted(max_size);
       InitializeMarkingDeque();
     }
@@ -490,16 +490,6 @@
 
   Sweeper& sweeper() { return sweeper_; }
 
-  void RegisterWrappersWithEmbedderHeapTracer();
-
-  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
-  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
-
-  bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
-
-  void TracePossibleWrapper(JSObject* js_object);
-
  private:
   class EvacuateNewSpacePageVisitor;
   class EvacuateNewSpaceVisitor;
@@ -739,12 +729,9 @@
   base::VirtualMemory* marking_deque_memory_;
   size_t marking_deque_memory_committed_;
   MarkingDeque marking_deque_;
-  std::vector<std::pair<void*, void*>> wrappers_to_trace_;
 
   CodeFlusher* code_flusher_;
 
-  EmbedderHeapTracer* embedder_heap_tracer_;
-
   List<Page*> evacuation_candidates_;
   List<Page*> newspace_evacuation_candidates_;
 
@@ -768,8 +755,7 @@
   MarkCompactCollector* collector_;
 };
 
-
-const char* AllocationSpaceName(AllocationSpace space);
+V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index 699e10e..ba9010e 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -73,7 +73,8 @@
       PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
                    state_.started_gcs);
     }
-    heap()->StartIdleIncrementalMarking();
+    heap()->StartIdleIncrementalMarking(
+        GarbageCollectionReason::kMemoryReducer);
   } else if (state_.action == kWait) {
     if (!heap()->incremental_marking()->IsStopped() &&
         heap()->ShouldOptimizeForMemoryUsage()) {
@@ -84,12 +85,10 @@
       double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
                         kIncrementalMarkingDelayMs;
       heap()->incremental_marking()->AdvanceIncrementalMarking(
-          deadline, i::IncrementalMarking::StepActions(
-                        i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                        i::IncrementalMarking::FORCE_MARKING,
-                        i::IncrementalMarking::FORCE_COMPLETION));
+          deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+          IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
       heap()->FinalizeIncrementalMarkingIfComplete(
-          "Memory reducer: finalize incremental marking");
+          GarbageCollectionReason::kFinalizeMarkingViaTask);
     }
     // Re-schedule the timer.
     ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index 3f43212..6e4b50e 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -42,6 +42,16 @@
   PrintF(" ]");
 }
 
+V8_NOINLINE static void DumpJSONArray(std::stringstream& stream, size_t* array,
+                                      const int len) {
+  stream << "[";
+  for (int i = 0; i < len; i++) {
+    stream << array[i];
+    if (i != (len - 1)) stream << ",";
+  }
+  stream << "]";
+}
+
 void ObjectStats::PrintJSON(const char* key) {
   double time = isolate()->time_millis_since_init();
   int gc_count = heap()->gc_count();
@@ -102,6 +112,60 @@
 #undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
 #undef CODE_AGE_WRAPPER
 #undef PRINT_INSTANCE_TYPE_DATA
+#undef PRINT_KEY_AND_ID
+}
+
+void ObjectStats::Dump(std::stringstream& stream) {
+  double time = isolate()->time_millis_since_init();
+  int gc_count = heap()->gc_count();
+
+  stream << "{";
+  stream << "\"isolate\":\"" << reinterpret_cast<void*>(isolate()) << "\",";
+  stream << "\"id\":" << gc_count << ",";
+  stream << "\"time\":" << time << ",";
+  stream << "\"bucket_sizes\":[";
+  for (int i = 0; i < kNumberOfBuckets; i++) {
+    stream << (1 << (kFirstBucketShift + i));
+    if (i != (kNumberOfBuckets - 1)) stream << ",";
+  }
+  stream << "],";
+  stream << "\"type_data\":{";
+
+#define PRINT_INSTANCE_TYPE_DATA(name, index)                                \
+  stream << "\"" << name << "\":{";                                          \
+  stream << "\"type\":" << static_cast<int>(index) << ",";                   \
+  stream << "\"overall\":" << object_sizes_[index] << ",";                   \
+  stream << "\"count\":" << object_counts_[index] << ",";                    \
+  stream << "\"over_allocated\":" << over_allocated_[index] << ",";          \
+  stream << "\"histogram\":";                                                \
+  DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets);           \
+  stream << ",\"over_allocated_histogram\":";                                \
+  DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets); \
+  stream << "},";
+
+#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
+#define CODE_KIND_WRAPPER(name)            \
+  PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
+                           FIRST_CODE_KIND_SUB_TYPE + Code::name)
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+  PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name,   \
+                           FIRST_FIXED_ARRAY_SUB_TYPE + name)
+#define CODE_AGE_WRAPPER(name) \
+  PRINT_INSTANCE_TYPE_DATA(    \
+      "*CODE_AGE_" #name,      \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+
+  INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
+  CODE_KIND_LIST(CODE_KIND_WRAPPER);
+  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
+  CODE_AGE_LIST_COMPLETE(CODE_AGE_WRAPPER);
+  stream << "\"END\":{}}}";
+
+#undef INSTANCE_TYPE_WRAPPER
+#undef CODE_KIND_WRAPPER
+#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
+#undef CODE_AGE_WRAPPER
+#undef PRINT_INSTANCE_TYPE_DATA
 }
 
 void ObjectStats::CheckpointObjectStats() {
@@ -246,8 +310,6 @@
                         OBJECT_TO_CODE_SUB_TYPE);
   RecordHashTableHelper(nullptr, heap_->code_stubs(),
                         CODE_STUBS_TABLE_SUB_TYPE);
-  RecordHashTableHelper(nullptr, heap_->intrinsic_function_names(),
-                        INTRINSIC_FUNCTION_NAMES_SUB_TYPE);
   RecordHashTableHelper(nullptr, heap_->empty_properties_dictionary(),
                         EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
   CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
@@ -447,9 +509,11 @@
   if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
     DeoptimizationInputData* input_data =
         DeoptimizationInputData::cast(code->deoptimization_data());
-    RecordFixedArrayHelper(code->deoptimization_data(),
-                           input_data->LiteralArray(),
-                           OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+    if (input_data->length() > 0) {
+      RecordFixedArrayHelper(code->deoptimization_data(),
+                             input_data->LiteralArray(),
+                             OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+    }
   }
   RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
                          0);
diff --git a/src/heap/object-stats.h b/src/heap/object-stats.h
index 4780696..add5a12 100644
--- a/src/heap/object-stats.h
+++ b/src/heap/object-stats.h
@@ -35,6 +35,7 @@
 
   void CheckpointObjectStats();
   void PrintJSON(const char* key);
+  void Dump(std::stringstream& stream);
 
   void RecordObjectStats(InstanceType type, size_t size) {
     DCHECK(type <= LAST_TYPE);
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index 148975f..252b2fe 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -147,11 +147,17 @@
 
   table_.Register(kVisitNativeContext, &VisitNativeContext);
 
-  table_.Register(kVisitAllocationSite, &VisitAllocationSite);
+  table_.Register(
+      kVisitAllocationSite,
+      &FixedBodyVisitor<StaticVisitor, AllocationSite::MarkingBodyDescriptor,
+                        void>::Visit);
 
   table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
 
-  table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
+  table_.Register(
+      kVisitBytecodeArray,
+      &FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
+                        void>::Visit);
 
   table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
 
@@ -178,13 +184,15 @@
       &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
                            void>::Visit);
 
-  // Registration for kVisitJSRegExp is done by StaticVisitor.
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
 
   table_.Register(
       kVisitCell,
       &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
 
-  table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+  table_.Register(kVisitPropertyCell,
+                  &FixedBodyVisitor<StaticVisitor, PropertyCell::BodyDescriptor,
+                                    void>::Visit);
 
   table_.Register(kVisitWeakCell, &VisitWeakCell);
 
@@ -319,19 +327,6 @@
   }
 }
 
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  StaticVisitor::VisitPointers(
-      heap, object,
-      HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
-      HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
-}
-
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
                                                         HeapObject* object) {
@@ -384,19 +379,6 @@
   }
 }
 
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  StaticVisitor::VisitPointers(
-      heap, object,
-      HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
-      HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
-}
-
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
     Map* map, HeapObject* object) {
@@ -467,11 +449,11 @@
       // optimized code.
       collector->code_flusher()->AddCandidate(shared);
       // Treat the reference to the code object weakly.
-      VisitSharedFunctionInfoWeakCode(heap, object);
+      VisitSharedFunctionInfoWeakCode(map, object);
       return;
     }
   }
-  VisitSharedFunctionInfoStrongCode(heap, object);
+  VisitSharedFunctionInfoStrongCode(map, object);
 }
 
 
@@ -504,23 +486,6 @@
   VisitJSFunctionStrongCode(map, object);
 }
 
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
-                                                        HeapObject* object) {
-  JSObjectVisitor::Visit(map, object);
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
-    Map* map, HeapObject* object) {
-  StaticVisitor::VisitPointers(
-      map->GetHeap(), object,
-      HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
-      HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
-}
-
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
                                                           Map* map) {
@@ -623,7 +588,7 @@
   // We do not (yet?) flush code for generator functions, or async functions,
   // because we don't know if there are still live activations
   // (generator objects) on the heap.
-  if (shared_info->is_resumable()) {
+  if (IsResumableFunction(shared_info->kind())) {
     return false;
   }
 
@@ -656,39 +621,23 @@
   return true;
 }
 
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot = HeapObject::RawField(
-      object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
-  Object** end_slot = HeapObject::RawField(
-      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+    Map* map, HeapObject* object) {
+  FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
+                   void>::Visit(map, object);
 }
 
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
-    Heap* heap, HeapObject* object) {
-  Object** name_slot =
-      HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
-  StaticVisitor::VisitPointer(heap, object, name_slot);
-
+    Map* map, HeapObject* object) {
   // Skip visiting kCodeOffset as it is treated weakly here.
-  STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
-                SharedFunctionInfo::kCodeOffset);
-  STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
-                SharedFunctionInfo::kOptimizedCodeMapOffset);
-
-  Object** start_slot =
-      HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
-  Object** end_slot = HeapObject::RawField(
-      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+  STATIC_ASSERT(SharedFunctionInfo::kCodeOffset <
+                SharedFunctionInfo::BodyDescriptorWeakCode::kStartOffset);
+  FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptorWeakCode,
+                   void>::Visit(map, object);
 }
 
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
     Map* map, HeapObject* object) {
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 83e2e1c..9393fcc 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -107,7 +107,6 @@
     case JS_ARGUMENTS_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
@@ -120,6 +119,7 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
+    case JS_STRING_ITERATOR_TYPE:
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index 303db0e..633c277 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -132,7 +132,7 @@
            (base == kVisitJSObject) || (base == kVisitJSApiObject));
     DCHECK(IsAligned(object_size, kPointerSize));
     DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
-    DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+    DCHECK(object_size <= kMaxRegularHeapObjectSize);
     DCHECK(!has_unboxed_fields || (base == kVisitJSObject) ||
            (base == kVisitJSApiObject));
 
@@ -354,7 +354,6 @@
     table_.GetVisitor(map)(map, obj);
   }
 
-  INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
   INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
   INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
   INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
@@ -374,12 +373,9 @@
   INLINE(static void VisitMap(Map* map, HeapObject* object));
   INLINE(static void VisitCode(Map* map, HeapObject* object));
   INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
-  INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
   INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
   INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
-  INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
   INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
-  INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
 
   // Mark pointers in a Map treating some elements of the descriptor array weak.
   static void MarkMapContents(Heap* heap, Map* map);
@@ -390,8 +386,8 @@
 
   // Helpers used by code flushing support that visit pointer fields and treat
   // references to code objects either strongly or weakly.
-  static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
-  static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+  static void VisitSharedFunctionInfoStrongCode(Map* map, HeapObject* object);
+  static void VisitSharedFunctionInfoWeakCode(Map* map, HeapObject* object);
   static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
   static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
 
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
index 6575d55..c5dab90 100644
--- a/src/heap/remembered-set.cc
+++ b/src/heap/remembered-set.cc
@@ -20,10 +20,12 @@
   for (MemoryChunk* chunk : *heap->old_space()) {
     SlotSet* slots = GetSlotSet(chunk);
     if (slots != nullptr) {
-      slots->Iterate([heap, chunk](Address addr) {
-        Object** slot = reinterpret_cast<Object**>(addr);
-        return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
-      });
+      slots->Iterate(
+          [heap, chunk](Address addr) {
+            Object** slot = reinterpret_cast<Object**>(addr);
+            return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
+          },
+          SlotSet::PREFREE_EMPTY_BUCKETS);
     }
   }
   for (MemoryChunk* chunk : *heap->code_space()) {
@@ -36,20 +38,24 @@
             } else {
               return REMOVE_SLOT;
             }
-          });
+          },
+          TypedSlotSet::PREFREE_EMPTY_CHUNKS);
     }
   }
   for (MemoryChunk* chunk : *heap->map_space()) {
     SlotSet* slots = GetSlotSet(chunk);
     if (slots != nullptr) {
-      slots->Iterate([heap, chunk](Address addr) {
-        Object** slot = reinterpret_cast<Object**>(addr);
-        // TODO(mlippautz): In map space all allocations would ideally be map
-        // aligned. After establishing this invariant IsValidSlot could just
-        // refer to the containing object using alignment and check the mark
-        // bits.
-        return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
-      });
+      slots->Iterate(
+          [heap, chunk](Address addr) {
+            Object** slot = reinterpret_cast<Object**>(addr);
+            // TODO(mlippautz): In map space all allocations would ideally be
+            // map
+            // aligned. After establishing this invariant IsValidSlot could just
+            // refer to the containing object using alignment and check the mark
+            // bits.
+            return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
+          },
+          SlotSet::PREFREE_EMPTY_BUCKETS);
     }
   }
 }
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index 8022d52..74791b9 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -116,10 +116,13 @@
       size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
       int new_count = 0;
       for (size_t page = 0; page < pages; page++) {
-        new_count += slots[page].Iterate(callback);
+        new_count +=
+            slots[page].Iterate(callback, SlotSet::PREFREE_EMPTY_BUCKETS);
       }
-      if (new_count == 0) {
-        ReleaseSlotSet(chunk);
+      // Only old-to-old slot sets are released eagerly. Old-new-slot sets are
+      // released by the sweeper threads.
+      if (direction == OLD_TO_OLD && new_count == 0) {
+        chunk->ReleaseOldToOldSlots();
       }
     }
   }
@@ -149,10 +152,13 @@
   static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
     TypedSlotSet* slots = GetTypedSlotSet(page);
     if (slots != nullptr) {
-      slots->Iterate([start, end](SlotType slot_type, Address host_addr,
-                                  Address slot_addr) {
-        return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
-      });
+      slots->Iterate(
+          [start, end](SlotType slot_type, Address host_addr,
+                       Address slot_addr) {
+            return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
+                                                         : KEEP_SLOT;
+          },
+          TypedSlotSet::PREFREE_EMPTY_CHUNKS);
     }
   }
 
@@ -173,7 +179,7 @@
   static void IterateTyped(MemoryChunk* chunk, Callback callback) {
     TypedSlotSet* slots = GetTypedSlotSet(chunk);
     if (slots != nullptr) {
-      int new_count = slots->Iterate(callback);
+      int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
       if (new_count == 0) {
         ReleaseTypedSlotSet(chunk);
       }
@@ -216,19 +222,9 @@
     }
   }
 
-  static void ReleaseSlotSet(MemoryChunk* chunk) {
-    if (direction == OLD_TO_OLD) {
-      chunk->ReleaseOldToOldSlots();
-    } else {
-      chunk->ReleaseOldToNewSlots();
-    }
-  }
-
   static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       chunk->ReleaseTypedOldToOldSlots();
-    } else {
-      chunk->ReleaseTypedOldToNewSlots();
     }
   }
 
@@ -363,7 +359,7 @@
       case OBJECT_SLOT: {
         return callback(reinterpret_cast<Object**>(addr));
       }
-      case NUMBER_OF_SLOT_TYPES:
+      case CLEARED_SLOT:
         break;
     }
     UNREACHABLE();
@@ -382,7 +378,7 @@
     return DEBUG_TARGET_SLOT;
   }
   UNREACHABLE();
-  return NUMBER_OF_SLOT_TYPES;
+  return CLEARED_SLOT;
 }
 
 }  // namespace internal
diff --git a/src/heap/scavenge-job.cc b/src/heap/scavenge-job.cc
index d89c945..66d4307 100644
--- a/src/heap/scavenge-job.cc
+++ b/src/heap/scavenge-job.cc
@@ -34,7 +34,7 @@
                                  new_space_capacity)) {
     if (EnoughIdleTimeForScavenge(
             idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
-      heap->CollectGarbage(NEW_SPACE, "idle task: scavenge");
+      heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kIdleTask);
     } else {
       // Immediately request another idle task that can get larger idle time.
       job_->RescheduleIdleTask(heap);
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index 651af88..017667b 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -5,7 +5,10 @@
 #ifndef V8_SLOT_SET_H
 #define V8_SLOT_SET_H
 
+#include <stack>
+
 #include "src/allocation.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/bits.h"
 #include "src/utils.h"
 
@@ -22,9 +25,11 @@
 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
 class SlotSet : public Malloced {
  public:
+  enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
+
   SlotSet() {
     for (int i = 0; i < kBuckets; i++) {
-      bucket[i] = nullptr;
+      bucket[i].SetValue(nullptr);
     }
   }
 
@@ -32,30 +37,38 @@
     for (int i = 0; i < kBuckets; i++) {
       ReleaseBucket(i);
     }
+    FreeToBeFreedBuckets();
   }
 
   void SetPageStart(Address page_start) { page_start_ = page_start; }
 
   // The slot offset specifies a slot at address page_start_ + slot_offset.
+  // This method should only be called on the main thread because concurrent
+  // allocation of the bucket is not thread-safe.
   void Insert(int slot_offset) {
     int bucket_index, cell_index, bit_index;
     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
-    if (bucket[bucket_index] == nullptr) {
-      bucket[bucket_index] = AllocateBucket();
+    base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+    if (current_bucket == nullptr) {
+      current_bucket = AllocateBucket();
+      bucket[bucket_index].SetValue(current_bucket);
     }
-    bucket[bucket_index][cell_index] |= 1u << bit_index;
+    if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
+      current_bucket[cell_index].SetBit(bit_index);
+    }
   }
 
   // The slot offset specifies a slot at address page_start_ + slot_offset.
   void Remove(int slot_offset) {
     int bucket_index, cell_index, bit_index;
     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
-    if (bucket[bucket_index] != nullptr) {
-      uint32_t cell = bucket[bucket_index][cell_index];
+    base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+    if (current_bucket != nullptr) {
+      uint32_t cell = current_bucket[cell_index].Value();
       if (cell) {
         uint32_t bit_mask = 1u << bit_index;
         if (cell & bit_mask) {
-          bucket[bucket_index][cell_index] ^= bit_mask;
+          current_bucket[cell_index].ClearBit(bit_index);
         }
       }
     }
@@ -73,17 +86,17 @@
     uint32_t start_mask = (1u << start_bit) - 1;
     uint32_t end_mask = ~((1u << end_bit) - 1);
     if (start_bucket == end_bucket && start_cell == end_cell) {
-      MaskCell(start_bucket, start_cell, start_mask | end_mask);
+      ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
       return;
     }
     int current_bucket = start_bucket;
     int current_cell = start_cell;
-    MaskCell(current_bucket, current_cell, start_mask);
+    ClearCell(current_bucket, current_cell, ~start_mask);
     current_cell++;
     if (current_bucket < end_bucket) {
-      if (bucket[current_bucket] != nullptr) {
+      if (bucket[current_bucket].Value() != nullptr) {
         while (current_cell < kCellsPerBucket) {
-          bucket[current_bucket][current_cell] = 0;
+          bucket[current_bucket].Value()[current_cell].SetValue(0);
           current_cell++;
         }
       }
@@ -100,24 +113,25 @@
     }
     // All buckets between start_bucket and end_bucket are cleared.
     DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
-    if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) {
+    if (current_bucket == kBuckets ||
+        bucket[current_bucket].Value() == nullptr) {
       return;
     }
     while (current_cell < end_cell) {
-      bucket[current_bucket][current_cell] = 0;
+      bucket[current_bucket].Value()[current_cell].SetValue(0);
       current_cell++;
     }
     // All cells between start_cell and end_cell are cleared.
     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
-    MaskCell(end_bucket, end_cell, end_mask);
+    ClearCell(end_bucket, end_cell, ~end_mask);
   }
 
   // The slot offset specifies a slot at address page_start_ + slot_offset.
   bool Lookup(int slot_offset) {
     int bucket_index, cell_index, bit_index;
     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
-    if (bucket[bucket_index] != nullptr) {
-      uint32_t cell = bucket[bucket_index][cell_index];
+    if (bucket[bucket_index].Value() != nullptr) {
+      uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
       return (cell & (1u << bit_index)) != 0;
     }
     return false;
@@ -126,6 +140,7 @@
   // Iterate over all slots in the set and for each slot invoke the callback.
   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
   // Returns the new number of slots.
+  // This method should only be called on the main thread.
   //
   // Sample usage:
   // Iterate([](Address slot_address) {
@@ -133,16 +148,17 @@
   //    else return REMOVE_SLOT;
   // });
   template <typename Callback>
-  int Iterate(Callback callback) {
+  int Iterate(Callback callback, IterationMode mode) {
     int new_count = 0;
     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
-      if (bucket[bucket_index] != nullptr) {
+      if (bucket[bucket_index].Value() != nullptr) {
         int in_bucket_count = 0;
-        uint32_t* current_bucket = bucket[bucket_index];
+        base::AtomicValue<uint32_t>* current_bucket =
+            bucket[bucket_index].Value();
         int cell_offset = bucket_index * kBitsPerBucket;
         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
-          if (current_bucket[i]) {
-            uint32_t cell = current_bucket[i];
+          if (current_bucket[i].Value()) {
+            uint32_t cell = current_bucket[i].Value();
             uint32_t old_cell = cell;
             uint32_t new_cell = cell;
             while (cell) {
@@ -157,12 +173,24 @@
               cell ^= bit_mask;
             }
             if (old_cell != new_cell) {
-              current_bucket[i] = new_cell;
+              while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
+                // If TrySetValue fails, the cell must have changed. We just
+                // have to read the current value of the cell, & it with the
+                // computed value, and retry. We can do this, because this
+                // method will only be called on the main thread and filtering
+                // threads will only remove slots.
+                old_cell = current_bucket[i].Value();
+                new_cell &= old_cell;
+              }
             }
           }
         }
-        if (in_bucket_count == 0) {
-          ReleaseBucket(bucket_index);
+        if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
+          base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+          base::AtomicValue<uint32_t>* bucket_ptr =
+              bucket[bucket_index].Value();
+          to_be_freed_buckets_.push(bucket_ptr);
+          bucket[bucket_index].SetValue(nullptr);
         }
         new_count += in_bucket_count;
       }
@@ -170,6 +198,15 @@
     return new_count;
   }
 
+  void FreeToBeFreedBuckets() {
+    base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+    while (!to_be_freed_buckets_.empty()) {
+      base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
+      to_be_freed_buckets_.pop();
+      DeleteArray<base::AtomicValue<uint32_t>>(top);
+    }
+  }
+
  private:
   static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
   static const int kCellsPerBucket = 32;
@@ -180,24 +217,26 @@
   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
   static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
 
-  uint32_t* AllocateBucket() {
-    uint32_t* result = NewArray<uint32_t>(kCellsPerBucket);
+  base::AtomicValue<uint32_t>* AllocateBucket() {
+    base::AtomicValue<uint32_t>* result =
+        NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
     for (int i = 0; i < kCellsPerBucket; i++) {
-      result[i] = 0;
+      result[i].SetValue(0);
     }
     return result;
   }
 
   void ReleaseBucket(int bucket_index) {
-    DeleteArray<uint32_t>(bucket[bucket_index]);
-    bucket[bucket_index] = nullptr;
+    DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
+    bucket[bucket_index].SetValue(nullptr);
   }
 
-  void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
+  void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
     if (bucket_index < kBuckets) {
-      uint32_t* cells = bucket[bucket_index];
-      if (cells != nullptr && cells[cell_index] != 0) {
-        cells[cell_index] &= mask;
+      base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
+      if (cells != nullptr) {
+        uint32_t cell = cells[cell_index].Value();
+        if (cell) cells[cell_index].SetBits(0, mask);
       }
     } else {
       // GCC bug 59124: Emits wrong warnings
@@ -217,8 +256,10 @@
     *bit_index = slot & (kBitsPerCell - 1);
   }
 
-  uint32_t* bucket[kBuckets];
+  base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
   Address page_start_;
+  base::Mutex to_be_freed_buckets_mutex_;
+  std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
 };
 
 enum SlotType {
@@ -228,7 +269,7 @@
   CODE_TARGET_SLOT,
   CODE_ENTRY_SLOT,
   DEBUG_TARGET_SLOT,
-  NUMBER_OF_SLOT_TYPES
+  CLEARED_SLOT
 };
 
 // Data structure for maintaining a multiset of typed slots in a page.
@@ -240,51 +281,85 @@
 // typed slots contain V8 internal pointers that are not directly exposed to JS.
 class TypedSlotSet {
  public:
-  struct TypedSlot {
-    TypedSlot() : type_and_offset_(0), host_offset_(0) {}
+  enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
 
-    TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
-        : type_and_offset_(TypeField::encode(type) |
-                           OffsetField::encode(offset)),
-          host_offset_(host_offset) {}
+  typedef std::pair<SlotType, uint32_t> TypeAndOffset;
+
+  struct TypedSlot {
+    TypedSlot() {
+      type_and_offset_.SetValue(0);
+      host_offset_.SetValue(0);
+    }
+
+    TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
+      type_and_offset_.SetValue(TypeField::encode(type) |
+                                OffsetField::encode(offset));
+      host_offset_.SetValue(host_offset);
+    }
 
     bool operator==(const TypedSlot other) {
-      return type_and_offset_ == other.type_and_offset_ &&
-             host_offset_ == other.host_offset_;
+      return type_and_offset_.Value() == other.type_and_offset_.Value() &&
+             host_offset_.Value() == other.host_offset_.Value();
     }
 
     bool operator!=(const TypedSlot other) { return !(*this == other); }
 
-    SlotType type() { return TypeField::decode(type_and_offset_); }
+    SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
 
-    uint32_t offset() { return OffsetField::decode(type_and_offset_); }
+    uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
 
-    uint32_t host_offset() { return host_offset_; }
+    TypeAndOffset GetTypeAndOffset() {
+      uint32_t type_and_offset = type_and_offset_.Value();
+      return std::make_pair(TypeField::decode(type_and_offset),
+                            OffsetField::decode(type_and_offset));
+    }
 
-    uint32_t type_and_offset_;
-    uint32_t host_offset_;
+    uint32_t host_offset() { return host_offset_.Value(); }
+
+    void Set(TypedSlot slot) {
+      type_and_offset_.SetValue(slot.type_and_offset_.Value());
+      host_offset_.SetValue(slot.host_offset_.Value());
+    }
+
+    void Clear() {
+      type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
+                                OffsetField::encode(0));
+      host_offset_.SetValue(0);
+    }
+
+    base::AtomicValue<uint32_t> type_and_offset_;
+    base::AtomicValue<uint32_t> host_offset_;
   };
   static const int kMaxOffset = 1 << 29;
 
   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
-    chunk_ = new Chunk(nullptr, kInitialBufferSize);
+    chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
   }
 
   ~TypedSlotSet() {
-    Chunk* chunk = chunk_;
+    Chunk* chunk = chunk_.Value();
     while (chunk != nullptr) {
-      Chunk* next = chunk->next;
+      Chunk* next = chunk->next.Value();
       delete chunk;
       chunk = next;
     }
+    FreeToBeFreedChunks();
   }
 
   // The slot offset specifies a slot at address page_start_ + offset.
+  // This method can only be called on the main thread.
   void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
     TypedSlot slot(type, host_offset, offset);
-    if (!chunk_->AddSlot(slot)) {
-      chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
-      bool added = chunk_->AddSlot(slot);
+    Chunk* top_chunk = chunk_.Value();
+    if (!top_chunk) {
+      top_chunk = new Chunk(nullptr, kInitialBufferSize);
+      chunk_.SetValue(top_chunk);
+    }
+    if (!top_chunk->AddSlot(slot)) {
+      Chunk* new_top_chunk =
+          new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
+      bool added = new_top_chunk->AddSlot(slot);
+      chunk_.SetValue(new_top_chunk);
       DCHECK(added);
       USE(added);
     }
@@ -300,32 +375,60 @@
   //    else return REMOVE_SLOT;
   // });
   template <typename Callback>
-  int Iterate(Callback callback) {
-    STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
-    const TypedSlot kRemovedSlot(NUMBER_OF_SLOT_TYPES, 0, 0);
-    Chunk* chunk = chunk_;
+  int Iterate(Callback callback, IterationMode mode) {
+    STATIC_ASSERT(CLEARED_SLOT < 8);
+    Chunk* chunk = chunk_.Value();
+    Chunk* previous = nullptr;
     int new_count = 0;
     while (chunk != nullptr) {
-      TypedSlot* buffer = chunk->buffer;
-      int count = chunk->count;
+      TypedSlot* buffer = chunk->buffer.Value();
+      int count = chunk->count.Value();
+      bool empty = true;
       for (int i = 0; i < count; i++) {
-        TypedSlot slot = buffer[i];
-        if (slot != kRemovedSlot) {
-          SlotType type = slot.type();
-          Address addr = page_start_ + slot.offset();
-          Address host_addr = page_start_ + slot.host_offset();
+        // Order is important here. We have to read out the slot type last to
+        // observe the concurrent removal case consistently.
+        Address host_addr = page_start_ + buffer[i].host_offset();
+        TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
+        SlotType type = type_and_offset.first;
+        if (type != CLEARED_SLOT) {
+          Address addr = page_start_ + type_and_offset.second;
           if (callback(type, host_addr, addr) == KEEP_SLOT) {
             new_count++;
+            empty = false;
           } else {
-            buffer[i] = kRemovedSlot;
+            buffer[i].Clear();
           }
         }
       }
-      chunk = chunk->next;
+
+      Chunk* next = chunk->next.Value();
+      if (mode == PREFREE_EMPTY_CHUNKS && empty) {
+        // We remove the chunk from the list but let it still point its next
+        // chunk to allow concurrent iteration.
+        if (previous) {
+          previous->next.SetValue(next);
+        } else {
+          chunk_.SetValue(next);
+        }
+        base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
+        to_be_freed_chunks_.push(chunk);
+      } else {
+        previous = chunk;
+      }
+      chunk = next;
     }
     return new_count;
   }
 
+  void FreeToBeFreedChunks() {
+    base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
+    while (!to_be_freed_chunks_.empty()) {
+      Chunk* top = to_be_freed_chunks_.top();
+      to_be_freed_chunks_.pop();
+      delete top;
+    }
+  }
+
  private:
   static const int kInitialBufferSize = 100;
   static const int kMaxBufferSize = 16 * KB;
@@ -338,24 +441,34 @@
   class TypeField : public BitField<SlotType, 29, 3> {};
 
   struct Chunk : Malloced {
-    explicit Chunk(Chunk* next_chunk, int capacity)
-        : next(next_chunk), count(0), capacity(capacity) {
-      buffer = NewArray<TypedSlot>(capacity);
+    explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
+      count.SetValue(0);
+      capacity.SetValue(chunk_capacity);
+      buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
+      next.SetValue(next_chunk);
     }
     bool AddSlot(TypedSlot slot) {
-      if (count == capacity) return false;
-      buffer[count++] = slot;
+      int current_count = count.Value();
+      if (current_count == capacity.Value()) return false;
+      TypedSlot* current_buffer = buffer.Value();
+      // Order is important here. We have to write the slot first before
+      // increasing the counter to guarantee that a consistent state is
+      // observed by concurrent threads.
+      current_buffer[current_count].Set(slot);
+      count.SetValue(current_count + 1);
       return true;
     }
-    ~Chunk() { DeleteArray(buffer); }
-    Chunk* next;
-    int count;
-    int capacity;
-    TypedSlot* buffer;
+    ~Chunk() { DeleteArray(buffer.Value()); }
+    base::AtomicValue<Chunk*> next;
+    base::AtomicValue<int> count;
+    base::AtomicValue<int> capacity;
+    base::AtomicValue<TypedSlot*> buffer;
   };
 
   Address page_start_;
-  Chunk* chunk_;
+  base::AtomicValue<Chunk*> chunk_;
+  base::Mutex to_be_freed_chunks_mutex_;
+  std::stack<Chunk*> to_be_freed_chunks_;
 };
 
 }  // namespace internal
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 0fd69da..314d22f 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -165,14 +165,6 @@
 bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
 bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
-// --------------------------------------------------------------------------
-// AllocationResult
-
-AllocationSpace AllocationResult::RetrySpace() {
-  DCHECK(IsRetry());
-  return static_cast<AllocationSpace>(Smi::cast(object_)->value());
-}
-
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        SemiSpace* owner) {
   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 95d5687..c2043ed 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -398,7 +398,7 @@
   // We cannot free memory chunks in new space while the sweeper is running
   // since a sweeper thread might be stuck right before trying to lock the
   // corresponding page.
-  return !chunk->InNewSpace() || (mc == nullptr) ||
+  return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
          mc->sweeper().IsSweepingCompleted();
 }
 
@@ -446,7 +446,7 @@
   base::VirtualMemory reservation(size, alignment);
 
   if (!reservation.IsReserved()) return NULL;
-  size_.Increment(static_cast<intptr_t>(reservation.size()));
+  size_.Increment(reservation.size());
   Address base =
       RoundUp(static_cast<Address>(reservation.address()), alignment);
   controller->TakeControl(&reservation);
@@ -505,12 +505,12 @@
   chunk->size_ = size;
   chunk->area_start_ = area_start;
   chunk->area_end_ = area_end;
-  chunk->flags_ = 0;
+  chunk->flags_ = Flags(NO_FLAGS);
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
-  chunk->old_to_new_slots_ = nullptr;
+  chunk->old_to_new_slots_.SetValue(nullptr);
   chunk->old_to_old_slots_ = nullptr;
-  chunk->typed_old_to_new_slots_ = nullptr;
+  chunk->typed_old_to_new_slots_.SetValue(nullptr);
   chunk->typed_old_to_old_slots_ = nullptr;
   chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
@@ -528,7 +528,6 @@
   chunk->black_area_end_marker_map_ = nullptr;
 
   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
-  DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
 
   if (executable == EXECUTABLE) {
     chunk->SetFlag(IS_EXECUTABLE);
@@ -617,6 +616,21 @@
   set_next_chunk(NULL);
 }
 
+void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
+  DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
+  DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
+  Address free_start = chunk->area_end_ - bytes_to_shrink;
+  // Don't adjust the size of the page. The area is just uncomitted but not
+  // released.
+  chunk->area_end_ -= bytes_to_shrink;
+  UncommitBlock(free_start, bytes_to_shrink);
+  if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+    if (chunk->reservation_.IsReserved())
+      chunk->reservation_.Guard(chunk->area_end_);
+    else
+      base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
+  }
+}
 
 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
                                             intptr_t commit_area_size,
@@ -667,8 +681,7 @@
                  CodePageGuardSize();
 
     // Check executable memory limit.
-    if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
-        capacity_executable_) {
+    if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
                                 "V8 Executable Allocation capacity exceeded"));
       return NULL;
@@ -691,16 +704,16 @@
       DCHECK(
           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
-      size_.Increment(static_cast<intptr_t>(chunk_size));
+      size_.Increment(chunk_size);
       // Update executable memory size.
-      size_executable_.Increment(static_cast<intptr_t>(chunk_size));
+      size_executable_.Increment(chunk_size);
     } else {
       base = AllocateAlignedMemory(chunk_size, commit_size,
                                    MemoryChunk::kAlignment, executable,
                                    &reservation);
       if (base == NULL) return NULL;
       // Update executable memory size.
-      size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
+      size_executable_.Increment(reservation.size());
     }
 
     if (Heap::ShouldZapGarbage()) {
@@ -745,9 +758,9 @@
     last_chunk_.TakeControl(&reservation);
     UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
                   last_chunk_.size());
-    size_.Increment(-static_cast<intptr_t>(chunk_size));
+    size_.Decrement(chunk_size);
     if (executable == EXECUTABLE) {
-      size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
+      size_executable_.Decrement(chunk_size);
     }
     CHECK(last_chunk_.IsReserved());
     return AllocateChunk(reserve_area_size, commit_area_size, executable,
@@ -764,6 +777,53 @@
   available_in_free_list_ = 0;
 }
 
+size_t Page::ShrinkToHighWaterMark() {
+  // Shrink pages to high water mark. The water mark points either to a filler
+  // or the area_end.
+  HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
+  if (filler->address() == area_end()) return 0;
+  CHECK(filler->IsFiller());
+  if (!filler->IsFreeSpace()) return 0;
+
+#ifdef DEBUG
+  // Check the the filler is indeed the last filler on the page.
+  HeapObjectIterator it(this);
+  HeapObject* filler2 = nullptr;
+  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+    filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
+  }
+  if (filler2 == nullptr || filler2->address() == area_end()) return 0;
+  DCHECK(filler2->IsFiller());
+  // The deserializer might leave behind fillers. In this case we need to
+  // iterate even further.
+  while ((filler2->address() + filler2->Size()) != area_end()) {
+    filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
+    DCHECK(filler2->IsFiller());
+  }
+  DCHECK_EQ(filler->address(), filler2->address());
+#endif  // DEBUG
+
+  size_t unused = RoundDown(
+      static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
+      base::OS::CommitPageSize());
+  if (unused > 0) {
+    if (FLAG_trace_gc_verbose) {
+      PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
+                   reinterpret_cast<void*>(this),
+                   reinterpret_cast<void*>(area_end()),
+                   reinterpret_cast<void*>(area_end() - unused));
+    }
+    heap()->CreateFillerObjectAt(
+        filler->address(),
+        static_cast<int>(area_end() - filler->address() - unused),
+        ClearRecordedSlots::kNo);
+    heap()->memory_allocator()->ShrinkChunk(this, unused);
+    CHECK(filler->IsFiller());
+    CHECK_EQ(filler->address() + filler->Size(), area_end());
+  }
+  return unused;
+}
+
 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
                                         Address start_free) {
   // We do not allow partial shrink for code.
@@ -776,8 +836,8 @@
 
   size_t to_free_size = size - (start_free - chunk->address());
 
-  DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size));
-  size_.Increment(-static_cast<intptr_t>(to_free_size));
+  DCHECK(size_.Value() >= to_free_size);
+  size_.Decrement(to_free_size);
   isolate_->counters()->memory_allocated()->Decrement(
       static_cast<int>(to_free_size));
   chunk->set_size(size - to_free_size);
@@ -792,20 +852,15 @@
   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                          chunk->IsEvacuationCandidate());
 
-  intptr_t size;
   base::VirtualMemory* reservation = chunk->reserved_memory();
-  if (reservation->IsReserved()) {
-    size = static_cast<intptr_t>(reservation->size());
-  } else {
-    size = static_cast<intptr_t>(chunk->size());
-  }
-  DCHECK(size_.Value() >= size);
-  size_.Increment(-size);
+  const size_t size =
+      reservation->IsReserved() ? reservation->size() : chunk->size();
+  DCHECK_GE(size_.Value(), static_cast<size_t>(size));
+  size_.Decrement(size);
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
   if (chunk->executable() == EXECUTABLE) {
-    DCHECK(size_executable_.Value() >= size);
-    size_executable_.Increment(-size);
+    DCHECK_GE(size_executable_.Value(), size);
+    size_executable_.Decrement(size);
   }
 
   chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -938,10 +993,9 @@
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
-  intptr_t size = Size();
+  size_t size = Size();
   float pct = static_cast<float>(capacity_ - size) / capacity_;
-  PrintF("  capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
-         ", available: %%%d\n\n",
+  PrintF("  capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
          capacity_, size, static_cast<int>(pct * 100));
 }
 #endif
@@ -1014,9 +1068,9 @@
     delete mutex_;
     mutex_ = nullptr;
   }
-  if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
+  if (old_to_new_slots_.Value() != nullptr) ReleaseOldToNewSlots();
   if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
-  if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
+  if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
   if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
   if (local_tracker_ != nullptr) ReleaseLocalTracker();
 }
@@ -1032,13 +1086,14 @@
 }
 
 void MemoryChunk::AllocateOldToNewSlots() {
-  DCHECK(nullptr == old_to_new_slots_);
-  old_to_new_slots_ = AllocateSlotSet(size_, address());
+  DCHECK(nullptr == old_to_new_slots_.Value());
+  old_to_new_slots_.SetValue(AllocateSlotSet(size_, address()));
 }
 
 void MemoryChunk::ReleaseOldToNewSlots() {
-  delete[] old_to_new_slots_;
-  old_to_new_slots_ = nullptr;
+  SlotSet* old_to_new_slots = old_to_new_slots_.Value();
+  delete[] old_to_new_slots;
+  old_to_new_slots_.SetValue(nullptr);
 }
 
 void MemoryChunk::AllocateOldToOldSlots() {
@@ -1052,13 +1107,14 @@
 }
 
 void MemoryChunk::AllocateTypedOldToNewSlots() {
-  DCHECK(nullptr == typed_old_to_new_slots_);
-  typed_old_to_new_slots_ = new TypedSlotSet(address());
+  DCHECK(nullptr == typed_old_to_new_slots_.Value());
+  typed_old_to_new_slots_.SetValue(new TypedSlotSet(address()));
 }
 
 void MemoryChunk::ReleaseTypedOldToNewSlots() {
-  delete typed_old_to_new_slots_;
-  typed_old_to_new_slots_ = nullptr;
+  TypedSlotSet* typed_old_to_new_slots = typed_old_to_new_slots_.Value();
+  delete typed_old_to_new_slots;
+  typed_old_to_new_slots_.SetValue(nullptr);
 }
 
 void MemoryChunk::AllocateTypedOldToOldSlots() {
@@ -1235,18 +1291,29 @@
   return Smi::FromInt(0);
 }
 
-bool PagedSpace::Expand() {
-  int size = AreaSize();
-  if (snapshotable() && !HasPages()) {
-    size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
+void PagedSpace::ShrinkImmortalImmovablePages() {
+  DCHECK(!heap()->deserialization_complete());
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  EmptyAllocationInfo();
+  ResetFreeList();
+
+  for (Page* page : *this) {
+    DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
+    size_t unused = page->ShrinkToHighWaterMark();
+    accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+    AccountUncommitted(unused);
   }
+}
+
+bool PagedSpace::Expand() {
+  const int size = AreaSize();
 
   if (!heap()->CanExpandOldGeneration(size)) return false;
 
   Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
   if (p == nullptr) return false;
 
-  AccountCommitted(static_cast<intptr_t>(p->size()));
+  AccountCommitted(p->size());
 
   // Pages created during bootstrapping may contain immortal immovable objects.
   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
@@ -1336,7 +1403,6 @@
 
 void PagedSpace::ReleasePage(Page* page) {
   DCHECK_EQ(page->LiveBytes(), 0);
-  DCHECK_EQ(AreaSize(), page->area_size());
   DCHECK_EQ(page->owner(), this);
 
   free_list_.EvictFreeListItems(page);
@@ -1354,11 +1420,13 @@
     page->Unlink();
   }
 
-  AccountUncommitted(static_cast<intptr_t>(page->size()));
+  AccountUncommitted(page->size());
+  accounting_stats_.ShrinkSpace(page->area_size());
   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+}
 
-  DCHECK(Capacity() > 0);
-  accounting_stats_.ShrinkSpace(AreaSize());
+std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
+  return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
 }
 
 #ifdef DEBUG
@@ -1481,7 +1549,7 @@
 
 
 void NewSpace::Shrink() {
-  int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
+  int new_capacity = Max(InitialTotalCapacity(), 2 * static_cast<int>(Size()));
   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < TotalCapacity() &&
       to_space_.ShrinkTo(rounded_new_capacity)) {
@@ -1747,6 +1815,10 @@
   }
 }
 
+std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
+  return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
+}
+
 #ifdef VERIFY_HEAP
 // We do not use the SemiSpaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
@@ -1903,7 +1975,7 @@
     new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
     last_page = new_page;
   }
-  AccountCommitted(static_cast<intptr_t>(delta));
+  AccountCommitted(delta);
   current_capacity_ = new_capacity;
   return true;
 }
@@ -1940,7 +2012,7 @@
           last_page);
       delta_pages--;
     }
-    AccountUncommitted(static_cast<intptr_t>(delta));
+    AccountUncommitted(delta);
     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
   }
   current_capacity_ = new_capacity;
@@ -2010,7 +2082,6 @@
   from->FixPagesFlags(0, 0);
 }
 
-
 void SemiSpace::set_age_mark(Address mark) {
   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
@@ -2020,6 +2091,11 @@
   }
 }
 
+std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
+  // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
+  UNREACHABLE();
+  return std::unique_ptr<ObjectIterator>();
+}
 
 #ifdef DEBUG
 void SemiSpace::Print() {}
@@ -2490,14 +2566,13 @@
   // Don't free list allocate if there is linear space available.
   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
 
-  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
   owner_->EmptyAllocationInfo();
 
-  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
-                                                      old_linear_size);
+  owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+      Heap::kNoGCFlags, kNoGCCallbackFlags);
 
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2778,19 +2853,7 @@
     }
   }
 
-  // Free list allocation failed and there is no next page.  Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    // If sweeper threads are active, wait for them at that point and steal
-    // elements form their free-lists.
-    HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
-    return object;
-  }
-
-  // Try to expand the space and allocate in the new next page.
-  if (Expand()) {
+  if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
     DCHECK((CountTotalPages() > 1) ||
            (size_in_bytes <= free_list_.Available()));
     return free_list_.Allocate(size_in_bytes);
@@ -2874,7 +2937,7 @@
       size_(0),
       page_count_(0),
       objects_size_(0),
-      chunk_map_(base::HashMap::PointersMatch, 1024) {}
+      chunk_map_(1024) {}
 
 LargeObjectSpace::~LargeObjectSpace() {}
 
@@ -2914,7 +2977,7 @@
   DCHECK(page->area_size() >= object_size);
 
   size_ += static_cast<int>(page->size());
-  AccountCommitted(static_cast<intptr_t>(page->size()));
+  AccountCommitted(page->size());
   objects_size_ += object_size;
   page_count_++;
   page->set_next_page(first_page_);
@@ -2933,7 +2996,8 @@
     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
   }
 
-  heap()->incremental_marking()->OldSpaceStep(object_size);
+  heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+                                                            kNoGCCallbackFlags);
   AllocationStep(object->address(), object_size);
 
   if (heap()->incremental_marking()->black_allocation()) {
@@ -3050,7 +3114,7 @@
 
       // Free the chunk.
       size_ -= static_cast<int>(page->size());
-      AccountUncommitted(static_cast<intptr_t>(page->size()));
+      AccountUncommitted(page->size());
       objects_size_ -= object->Size();
       page_count_--;
 
@@ -3072,6 +3136,9 @@
   return owned;
 }
 
+std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
+  return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
+}
 
 #ifdef VERIFY_HEAP
 // We do not assume that the large object iterator works, because it depends
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index de5ea1b..732ba7e 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -16,6 +16,8 @@
 #include "src/base/hashmap.h"
 #include "src/base/platform/mutex.h"
 #include "src/flags.h"
+#include "src/globals.h"
+#include "src/heap/heap.h"
 #include "src/heap/marking.h"
 #include "src/list.h"
 #include "src/objects.h"
@@ -57,7 +59,7 @@
 // area.
 //
 // There is a separate large object space for objects larger than
-// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
+// kMaxRegularHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
 // may be larger than the page size.
 //
@@ -105,7 +107,7 @@
   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
 
 #define DCHECK_OBJECT_SIZE(size) \
-  DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+  DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
 
 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
   DCHECK((0 < size) && (size <= code_space->AreaSize()))
@@ -227,62 +229,75 @@
 // any heap object.
 class MemoryChunk {
  public:
-  enum MemoryChunkFlags {
-    IS_EXECUTABLE,
-    POINTERS_TO_HERE_ARE_INTERESTING,
-    POINTERS_FROM_HERE_ARE_INTERESTING,
-    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
-    IN_TO_SPACE,    // All pages in new space has one of these two set.
-    NEW_SPACE_BELOW_AGE_MARK,
-    EVACUATION_CANDIDATE,
-    NEVER_EVACUATE,  // May contain immortal immutables.
+  enum Flag {
+    NO_FLAGS = 0u,
+    IS_EXECUTABLE = 1u << 0,
+    POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+    POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+    // A page in new space has one of the next to flags set.
+    IN_FROM_SPACE = 1u << 3,
+    IN_TO_SPACE = 1u << 4,
+    NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
+    EVACUATION_CANDIDATE = 1u << 6,
+    NEVER_EVACUATE = 1u << 7,
 
     // Large objects can have a progress bar in their page header. These object
     // are scanned in increments and will be kept black while being scanned.
     // Even if the mutator writes to them they will be kept black and a white
     // to grey transition is performed in the value.
-    HAS_PROGRESS_BAR,
+    HAS_PROGRESS_BAR = 1u << 8,
 
     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
     // from new to old space during evacuation.
-    PAGE_NEW_OLD_PROMOTION,
+    PAGE_NEW_OLD_PROMOTION = 1u << 9,
 
     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
     // within the new space during evacuation.
-    PAGE_NEW_NEW_PROMOTION,
+    PAGE_NEW_NEW_PROMOTION = 1u << 10,
 
     // This flag is intended to be used for testing. Works only when both
     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
     // are set. It forces the page to become an evacuation candidate at next
     // candidates selection cycle.
-    FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+    FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
 
     // This flag is intended to be used for testing.
-    NEVER_ALLOCATE_ON_PAGE,
+    NEVER_ALLOCATE_ON_PAGE = 1u << 12,
 
     // The memory chunk is already logically freed, however the actual freeing
     // still has to be performed.
-    PRE_FREED,
+    PRE_FREED = 1u << 13,
 
     // |POOLED|: When actually freeing this chunk, only uncommit and do not
     // give up the reservation as we still reuse the chunk at some point.
-    POOLED,
+    POOLED = 1u << 14,
 
     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
     //   has been aborted and needs special handling by the sweeper.
-    COMPACTION_WAS_ABORTED,
+    COMPACTION_WAS_ABORTED = 1u << 15,
 
     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
     // on pages is sometimes aborted. The flag is used to avoid repeatedly
     // triggering on the same page.
-    COMPACTION_WAS_ABORTED_FOR_TESTING,
+    COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
 
     // |ANCHOR|: Flag is set if page is an anchor.
-    ANCHOR,
-
-    // Last flag, keep at bottom.
-    NUM_MEMORY_CHUNK_FLAGS
+    ANCHOR = 1u << 17,
   };
+  typedef base::Flags<Flag, uintptr_t> Flags;
+
+  static const int kPointersToHereAreInterestingMask =
+      POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+  static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      kEvacuationCandidateMask | kIsInNewSpaceMask;
 
   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
   //   not be performed on that page. Sweeper threads that are done with their
@@ -300,17 +315,6 @@
   // whether we have hit the limit and should do some more marking.
   static const int kWriteBarrierCounterGranularity = 500;
 
-  static const int kPointersToHereAreInterestingMask =
-      1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
-  static const int kPointersFromHereAreInterestingMask =
-      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
-  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
-
-  static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
-
   static const intptr_t kAlignment =
       (static_cast<uintptr_t>(1) << kPageSizeBits);
 
@@ -320,25 +324,21 @@
 
   static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
 
-  static const intptr_t kLiveBytesOffset =
+  static const size_t kWriteBarrierCounterOffset =
       kSizeOffset + kPointerSize  // size_t size
-      + kIntptrSize               // intptr_t flags_
+      + kIntptrSize               // Flags flags_
       + kPointerSize              // Address area_start_
       + kPointerSize              // Address area_end_
       + 2 * kPointerSize          // base::VirtualMemory reservation_
       + kPointerSize              // Address owner_
       + kPointerSize              // Heap* heap_
-      + kIntSize;                 // int progress_bar_
-
-  static const size_t kOldToNewSlotsOffset =
-      kLiveBytesOffset + kIntSize;  // int live_byte_count_
-
-  static const size_t kWriteBarrierCounterOffset =
-      kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
-      + kPointerSize                       // SlotSet* old_to_old_slots_;
-      + kPointerSize   // TypedSlotSet* typed_old_to_new_slots_;
-      + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
-      + kPointerSize;  // SkipList* skip_list_;
+      + kIntSize                  // int progress_bar_
+      + kIntSize                  // int live_bytes_count_
+      + kPointerSize              // SlotSet* old_to_new_slots_;
+      + kPointerSize              // SlotSet* old_to_old_slots_;
+      + kPointerSize              // TypedSlotSet* typed_old_to_new_slots_;
+      + kPointerSize              // TypedSlotSet* typed_old_to_old_slots_;
+      + kPointerSize;             // SkipList* skip_list_;
 
   static const size_t kMinHeaderSize =
       kWriteBarrierCounterOffset +
@@ -351,7 +351,7 @@
       + kPointerSize      // AtomicValue prev_chunk_
       // FreeListCategory categories_[kNumberOfCategories]
       + FreeListCategory::kSize * kNumberOfCategories +
-      kPointerSize  // LocalArrayBufferTracker* local_tracker_;
+      kPointerSize  // LocalArrayBufferTracker* local_tracker_
       // std::unordered_set<Address>* black_area_end_marker_map_
       + kPointerSize;
 
@@ -453,17 +453,17 @@
 
   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
 
-  inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
+  inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
   inline TypedSlotSet* typed_old_to_new_slots() {
-    return typed_old_to_new_slots_;
+    return typed_old_to_new_slots_.Value();
   }
   inline TypedSlotSet* typed_old_to_old_slots() {
     return typed_old_to_old_slots_;
   }
   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
 
-  void AllocateOldToNewSlots();
+  V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
   void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
@@ -498,7 +498,6 @@
   void ResetProgressBar() {
     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
       set_progress_bar(0);
-      ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
     }
   }
 
@@ -518,22 +517,18 @@
 
   void PrintMarkbits() { markbits()->Print(); }
 
-  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
-
-  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
-
-  bool IsFlagSet(int flag) {
-    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
-  }
+  void SetFlag(Flag flag) { flags_ |= flag; }
+  void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
+  bool IsFlagSet(Flag flag) { return flags_ & flag; }
 
   // Set or clear multiple flags at a time. The flags in the mask are set to
   // the value in "flags", the rest retain the current value in |flags_|.
-  void SetFlags(intptr_t flags, intptr_t mask) {
-    flags_ = (flags_ & ~mask) | (flags & mask);
+  void SetFlags(uintptr_t flags, uintptr_t mask) {
+    flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
   }
 
   // Return all current flags.
-  intptr_t GetFlags() { return flags_; }
+  uintptr_t GetFlags() { return flags_; }
 
   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
 
@@ -557,9 +552,7 @@
     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
   }
 
-  bool InNewSpace() {
-    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
-  }
+  bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
 
   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
 
@@ -634,7 +627,7 @@
   base::VirtualMemory* reserved_memory() { return &reservation_; }
 
   size_t size_;
-  intptr_t flags_;
+  Flags flags_;
 
   // Start and end of allocatable memory on this chunk.
   Address area_start_;
@@ -660,9 +653,9 @@
   // A single slot set for small pages (of size kPageSize) or an array of slot
   // set for large pages. In the latter case the number of entries in the array
   // is ceil(size() / kPageSize).
-  SlotSet* old_to_new_slots_;
+  base::AtomicValue<SlotSet*> old_to_new_slots_;
   SlotSet* old_to_old_slots_;
-  TypedSlotSet* typed_old_to_new_slots_;
+  base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
   TypedSlotSet* typed_old_to_old_slots_;
 
   SkipList* skip_list_;
@@ -700,6 +693,11 @@
   friend class MemoryChunkValidator;
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
+
+static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
+              "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
+
 // -----------------------------------------------------------------------------
 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
@@ -712,17 +710,8 @@
 
   // Page flags copied from from-space to to-space when flipping semispaces.
   static const intptr_t kCopyOnFlipFlagsMask =
-      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-  // Maximum object size that gets allocated into regular pages. Objects larger
-  // than that size are allocated in large object space and are never moved in
-  // memory. This also applies to new space allocation, since objects are never
-  // migrated from new space to large object space. Takes double alignment into
-  // account.
-  // TODO(hpayer): This limit should be way smaller but we currently have
-  // short living objects >256K.
-  static const int kMaxRegularHeapObjectSize = 600 * KB;
+      static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
   static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
 
@@ -823,6 +812,8 @@
     available_in_free_list_.Increment(available);
   }
 
+  size_t ShrinkToHighWaterMark();
+
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
@@ -918,9 +909,9 @@
 
   // Return the total amount committed memory for this space, i.e., allocatable
   // memory and page headers.
-  virtual intptr_t CommittedMemory() { return committed_; }
+  virtual size_t CommittedMemory() { return committed_; }
 
-  virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+  virtual size_t MaximumCommittedMemory() { return max_committed_; }
 
   // Returns allocated size.
   virtual intptr_t Size() = 0;
@@ -943,18 +934,19 @@
     }
   }
 
-  void AccountCommitted(intptr_t bytes) {
-    DCHECK_GE(bytes, 0);
+  virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
+
+  void AccountCommitted(size_t bytes) {
+    DCHECK_GE(committed_ + bytes, committed_);
     committed_ += bytes;
     if (committed_ > max_committed_) {
       max_committed_ = committed_;
     }
   }
 
-  void AccountUncommitted(intptr_t bytes) {
-    DCHECK_GE(bytes, 0);
+  void AccountUncommitted(size_t bytes) {
+    DCHECK_GE(committed_, committed_ - bytes);
     committed_ -= bytes;
-    DCHECK_GE(committed_, 0);
   }
 
 #ifdef DEBUG
@@ -971,8 +963,8 @@
   Executability executable_;
 
   // Keeps track of committed memory in a space.
-  intptr_t committed_;
-  intptr_t max_committed_;
+  size_t committed_;
+  size_t max_committed_;
 
   DISALLOW_COPY_AND_ASSIGN(Space);
 };
@@ -981,10 +973,6 @@
 class MemoryChunkValidator {
   // Computed offsets should match the compiler generated ones.
   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
-  STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
-                offsetof(MemoryChunk, live_byte_count_));
-  STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
-                offsetof(MemoryChunk, old_to_new_slots_));
   STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
                 offsetof(MemoryChunk, write_barrier_counter_));
 
@@ -1242,12 +1230,31 @@
     kRegular,
     kPooled,
   };
+
   enum FreeMode {
     kFull,
     kPreFreeAndQueue,
     kPooledAndQueue,
   };
 
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  static int PageAreaSize(AllocationSpace space) {
+    DCHECK_NE(LO_SPACE, space);
+    return (space == CODE_SPACE) ? CodePageAreaSize()
+                                 : Page::kAllocatableMemory;
+  }
+
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
@@ -1273,26 +1280,26 @@
   bool CanFreeMemoryChunk(MemoryChunk* chunk);
 
   // Returns allocated spaces in bytes.
-  intptr_t Size() { return size_.Value(); }
+  size_t Size() { return size_.Value(); }
 
   // Returns allocated executable spaces in bytes.
-  intptr_t SizeExecutable() { return size_executable_.Value(); }
+  size_t SizeExecutable() { return size_executable_.Value(); }
 
   // Returns the maximum available bytes of heaps.
-  intptr_t Available() {
-    intptr_t size = Size();
+  size_t Available() {
+    const size_t size = Size();
     return capacity_ < size ? 0 : capacity_ - size;
   }
 
   // Returns the maximum available executable bytes of heaps.
-  intptr_t AvailableExecutable() {
-    intptr_t executable_size = SizeExecutable();
+  size_t AvailableExecutable() {
+    const size_t executable_size = SizeExecutable();
     if (capacity_executable_ < executable_size) return 0;
     return capacity_executable_ - executable_size;
   }
 
   // Returns maximum available bytes that the old space can have.
-  intptr_t MaxAvailable() {
+  size_t MaxAvailable() {
     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
   }
 
@@ -1303,11 +1310,6 @@
            address >= highest_ever_allocated_.Value();
   }
 
-#ifdef DEBUG
-  // Reports statistic info of the space.
-  void ReportStatistics();
-#endif
-
   // Returns a MemoryChunk in which the memory region from commit_area_size to
   // reserve_area_size of the chunk area is reserved but not committed, it
   // could be committed later by calling MemoryChunk::CommitArea.
@@ -1315,6 +1317,8 @@
                              intptr_t commit_area_size,
                              Executability executable, Space* space);
 
+  void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
+
   Address ReserveAlignedMemory(size_t requested, size_t alignment,
                                base::VirtualMemory* controller);
   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
@@ -1343,24 +1347,6 @@
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
 
-  static int CodePageGuardStartOffset();
-
-  static int CodePageGuardSize();
-
-  static int CodePageAreaStartOffset();
-
-  static int CodePageAreaEndOffset();
-
-  static int CodePageAreaSize() {
-    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
-  }
-
-  static int PageAreaSize(AllocationSpace space) {
-    DCHECK_NE(LO_SPACE, space);
-    return (space == CODE_SPACE) ? CodePageAreaSize()
-                                 : Page::kAllocatableMemory;
-  }
-
   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
                                               Address start, size_t commit_size,
                                               size_t reserved_size);
@@ -1368,6 +1354,11 @@
   CodeRange* code_range() { return code_range_; }
   Unmapper* unmapper() { return &unmapper_; }
 
+#ifdef DEBUG
+  // Reports statistic info of the space.
+  void ReportStatistics();
+#endif
+
  private:
   // PreFree logically frees the object, i.e., it takes care of the size
   // bookkeeping and calls the allocation callback.
@@ -1381,28 +1372,6 @@
   template <typename SpaceType>
   MemoryChunk* AllocatePagePooled(SpaceType* owner);
 
-  Isolate* isolate_;
-
-  CodeRange* code_range_;
-
-  // Maximum space size in bytes.
-  intptr_t capacity_;
-  // Maximum subset of capacity_ that can be executable
-  intptr_t capacity_executable_;
-
-  // Allocated space size in bytes.
-  base::AtomicNumber<intptr_t> size_;
-  // Allocated executable space size in bytes.
-  base::AtomicNumber<intptr_t> size_executable_;
-
-  // We keep the lowest and highest addresses allocated as a quick way
-  // of determining that pointers are outside the heap. The estimate is
-  // conservative, i.e. not all addrsses in 'allocated' space are allocated
-  // to our heap. The range is [lowest, highest[, inclusive on the low end
-  // and exclusive on the high end.
-  base::AtomicValue<void*> lowest_ever_allocated_;
-  base::AtomicValue<void*> highest_ever_allocated_;
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -1423,6 +1392,27 @@
     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
   }
 
+  Isolate* isolate_;
+  CodeRange* code_range_;
+
+  // Maximum space size in bytes.
+  size_t capacity_;
+  // Maximum subset of capacity_ that can be executable
+  size_t capacity_executable_;
+
+  // Allocated space size in bytes.
+  base::AtomicNumber<size_t> size_;
+  // Allocated executable space size in bytes.
+  base::AtomicNumber<size_t> size_executable_;
+
+  // We keep the lowest and highest addresses allocated as a quick way
+  // of determining that pointers are outside the heap. The estimate is
+  // conservative, i.e. not all addresses in 'allocated' space are allocated
+  // to our heap. The range is [lowest, highest[, inclusive on the low end
+  // and exclusive on the high end.
+  base::AtomicValue<void*> lowest_ever_allocated_;
+  base::AtomicValue<void*> highest_ever_allocated_;
+
   base::VirtualMemory last_chunk_;
   Unmapper unmapper_;
 
@@ -1440,7 +1430,7 @@
 //       method which is used to avoid using virtual functions
 //       iterating a specific space.
 
-class ObjectIterator : public Malloced {
+class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
  public:
   virtual ~ObjectIterator() {}
   virtual HeapObject* Next() = 0;
@@ -1491,7 +1481,7 @@
 // If objects are allocated in the page during iteration the iterator may
 // or may not iterate over those objects.  The caller must create a new
 // iterator in order to be sure to visit these new objects.
-class HeapObjectIterator : public ObjectIterator {
+class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
  public:
   // Creates a new object iterator in a given space.
   explicit HeapObjectIterator(PagedSpace* space);
@@ -1880,50 +1870,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
 
-
-class AllocationResult {
- public:
-  // Implicit constructor from Object*.
-  AllocationResult(Object* object)  // NOLINT
-      : object_(object) {
-    // AllocationResults can't return Smis, which are used to represent
-    // failure and the space to retry in.
-    CHECK(!object->IsSmi());
-  }
-
-  AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
-
-  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
-    return AllocationResult(space);
-  }
-
-  inline bool IsRetry() { return object_->IsSmi(); }
-
-  template <typename T>
-  bool To(T** obj) {
-    if (IsRetry()) return false;
-    *obj = T::cast(object_);
-    return true;
-  }
-
-  Object* ToObjectChecked() {
-    CHECK(!IsRetry());
-    return object_;
-  }
-
-  inline AllocationSpace RetrySpace();
-
- private:
-  explicit AllocationResult(AllocationSpace space)
-      : object_(Smi::FromInt(static_cast<int>(space))) {}
-
-  Object* object_;
-};
-
-
-STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
-
-
 // LocalAllocationBuffer represents a linear allocation area that is created
 // from a given {AllocationResult} and can be used to allocate memory without
 // synchronization.
@@ -2196,6 +2142,12 @@
   iterator begin() { return iterator(anchor_.next_page()); }
   iterator end() { return iterator(&anchor_); }
 
+  // Shrink immortal immovable pages of the space to be exactly the size needed
+  // using the high water mark.
+  void ShrinkImmortalImmovablePages();
+
+  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
  protected:
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
@@ -2255,41 +2207,6 @@
   friend class HeapTester;
 };
 
-
-class NumberAndSizeInfo BASE_EMBEDDED {
- public:
-  NumberAndSizeInfo() : number_(0), bytes_(0) {}
-
-  int number() const { return number_; }
-  void increment_number(int num) { number_ += num; }
-
-  int bytes() const { return bytes_; }
-  void increment_bytes(int size) { bytes_ += size; }
-
-  void clear() {
-    number_ = 0;
-    bytes_ = 0;
-  }
-
- private:
-  int number_;
-  int bytes_;
-};
-
-
-// HistogramInfo class for recording a single "bar" of a histogram.  This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo : public NumberAndSizeInfo {
- public:
-  HistogramInfo() : NumberAndSizeInfo() {}
-
-  const char* name() { return name_; }
-  void set_name(const char* name) { name_ = name; }
-
- private:
-  const char* name_;
-};
-
 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
 
 // -----------------------------------------------------------------------------
@@ -2411,6 +2328,11 @@
     return 0;
   }
 
+  iterator begin() { return iterator(anchor_.next_page()); }
+  iterator end() { return iterator(anchor()); }
+
+  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
 #ifdef DEBUG
   void Print() override;
   // Validate a range of of addresses in a SemiSpace.
@@ -2426,9 +2348,6 @@
   virtual void Verify();
 #endif
 
-  iterator begin() { return iterator(anchor_.next_page()); }
-  iterator end() { return iterator(anchor()); }
-
  private:
   void RewindPages(Page* start, int num_pages);
 
@@ -2534,10 +2453,7 @@
            static_cast<int>(top() - to_space_.page_low());
   }
 
-  // The same, but returning an int.  We have to have the one that returns
-  // intptr_t because it is inherited, but if we know we are dealing with the
-  // new space, which can't get as big as the other spaces then this is useful:
-  int SizeAsInt() { return static_cast<int>(Size()); }
+  intptr_t SizeOfObjects() override { return Size(); }
 
   // Return the allocatable capacity of a semispace.
   intptr_t Capacity() {
@@ -2555,11 +2471,11 @@
 
   // Committed memory for NewSpace is the committed memory of both semi-spaces
   // combined.
-  intptr_t CommittedMemory() override {
+  size_t CommittedMemory() override {
     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
   }
 
-  intptr_t MaximumCommittedMemory() override {
+  size_t MaximumCommittedMemory() override {
     return from_space_.MaximumCommittedMemory() +
            to_space_.MaximumCommittedMemory();
   }
@@ -2760,6 +2676,8 @@
   iterator begin() { return to_space_.begin(); }
   iterator end() { return to_space_.end(); }
 
+  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
  private:
   // Update allocation info to match the current to-space page.
   void UpdateAllocationInfo();
@@ -2895,7 +2813,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
+// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
 // managed by the large object space. A large object is allocated from OS
 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
 // A large object always starts at Page::kObjectStartOffset to a page.
@@ -2973,6 +2891,8 @@
   iterator begin() { return iterator(first_page_); }
   iterator end() { return iterator(nullptr); }
 
+  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
 #ifdef VERIFY_HEAP
   virtual void Verify();
 #endif
@@ -3030,20 +2950,6 @@
   LargePageIterator lo_iterator_;
 };
 
-#ifdef DEBUG
-struct CommentStatistic {
-  const char* comment;
-  int size;
-  int count;
-  void Clear() {
-    comment = NULL;
-    size = 0;
-    count = 0;
-  }
-  // Must be small, since an iteration is used for lookup.
-  static const int kMaxComments = 64;
-};
-#endif
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/i18n.cc b/src/i18n.cc
index 3418ae7..58b8a8d 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -5,6 +5,8 @@
 
 #include "src/i18n.h"
 
+#include <memory>
+
 #include "src/api.h"
 #include "src/factory.h"
 #include "src/isolate.h"
@@ -115,13 +117,11 @@
   icu::SimpleDateFormat* date_format = NULL;
   icu::UnicodeString skeleton;
   if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
-    icu::DateTimePatternGenerator* generator =
-        icu::DateTimePatternGenerator::createInstance(icu_locale, status);
+    std::unique_ptr<icu::DateTimePatternGenerator> generator(
+        icu::DateTimePatternGenerator::createInstance(icu_locale, status));
     icu::UnicodeString pattern;
-    if (U_SUCCESS(status)) {
+    if (U_SUCCESS(status))
       pattern = generator->getBestPattern(skeleton, status);
-      delete generator;
-    }
 
     date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
     if (U_SUCCESS(status)) {
@@ -132,7 +132,7 @@
   if (U_FAILURE(status)) {
     delete calendar;
     delete date_format;
-    date_format = NULL;
+    date_format = nullptr;
   }
 
   return date_format;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 6f2fb97..edab277 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1301,7 +1301,6 @@
   // edi : the function to call
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_increment_count, done_initialize_count;
 
   // Load the cache state into ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1314,7 +1313,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
-  __ j(equal, &done_increment_count, Label::kFar);
+  __ j(equal, &done, Label::kFar);
   __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1337,7 +1336,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
   __ cmp(edi, ecx);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done_increment_count, Label::kFar);
+  __ jmp(&done, Label::kFar);
 
   __ bind(&miss);
 
@@ -1366,26 +1365,17 @@
   // slot.
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done_initialize_count);
+  __ jmp(&done);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
-  __ bind(&done_initialize_count);
 
-  // Initialize the call counter.
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-  __ jmp(&done);
-
-  __ bind(&done_increment_count);
-  // Increment the call count for monomorphic function calls.
+  __ bind(&done);
+  // Increment the call count for all function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
          Immediate(Smi::FromInt(1)));
-
-  __ bind(&done);
 }
 
 
@@ -1431,6 +1421,12 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // edi - function
@@ -1446,9 +1442,7 @@
                            FixedArray::kHeaderSize));
 
   // Increment the call count for monomorphic function calls.
-  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
+  IncrementCallCount(masm, ebx, edx);
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
@@ -1464,7 +1458,7 @@
   // edx - slot id
   // ebx - vector
   Isolate* isolate = masm->isolate();
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1493,12 +1487,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(edi, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, ebx, edx);
+
   __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1539,6 +1532,12 @@
       Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, ebx, edx);
+
+  __ bind(&call_count_incremented);
+
   __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1564,11 +1563,6 @@
   __ cmp(ecx, NativeContextOperand());
   __ j(not_equal, &miss);
 
-  // Initialize the call counter.
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // ebx - vector
   // edx - slot
@@ -1576,11 +1570,15 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(isolate);
+    __ push(ebx);
+    __ push(edx);
     __ push(edi);
     __ push(esi);
     __ CallStub(&create_stub);
     __ pop(esi);
     __ pop(edi);
+    __ pop(edx);
+    __ pop(ebx);
   }
 
   __ jmp(&call_function);
@@ -1590,7 +1588,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ jmp(&call);
+  __ jmp(&call_count_incremented);
 
   // Unreachable
   __ int3();
@@ -2068,297 +2066,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  esp[0]: return address
-  //  esp[4]: to
-  //  esp[8]: from
-  //  esp[12]: string
-
-  // Make sure first argument is a string.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(eax, &runtime);
-  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
-  __ j(NegateCondition(is_string), &runtime);
-
-  // eax: string
-  // ebx: instance type
-
-  // Calculate length of sub string using the smi values.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
-  __ JumpIfNotSmi(ecx, &runtime);
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
-  __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, edx);
-  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
-  Label not_original_string;
-  // Shorter than original string's length: an actual substring.
-  __ j(below, &not_original_string, Label::kNear);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ j(above, &runtime);
-  // Return original string.
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-  __ bind(&not_original_string);
-
-  Label single_char;
-  __ cmp(ecx, Immediate(Smi::FromInt(1)));
-  __ j(equal, &single_char);
-
-  // eax: string
-  // ebx: instance type
-  // ecx: sub string length (smi)
-  // edx: from index (smi)
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into edi.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ test(ebx, Immediate(kIsIndirectStringMask));
-  __ j(zero, &seq_or_external_string, Label::kNear);
-
-  Factory* factory = isolate()->factory();
-  __ test(ebx, Immediate(kSlicedNotConsMask));
-  __ j(not_zero, &sliced_string, Label::kNear);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  // Flat cons strings have an empty second part.
-  __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
-         factory->empty_string());
-  __ j(not_equal, &runtime);
-  __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and adjust start index by offset.
-  __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
-  __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(edi, eax);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // edi: underlying subject string
-    // ebx: instance type of underlying subject string
-    // edx: adjusted start index (smi)
-    // ecx: length (smi)
-    __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
-    // Short slice.  Copy instead of slicing.
-    __ j(less, &copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ test(ebx, Immediate(kStringEncodingMask));
-    __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
-    __ jmp(&set_slice_header, Label::kNear);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
-    __ bind(&set_slice_header);
-    __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
-    __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
-           Immediate(String::kEmptyHashField));
-    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
-    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
-    __ IncrementCounter(counters->sub_string_native(), 1);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&copy_routine);
-  }
-
-  // edi: underlying subject string
-  // ebx: instance type of underlying subject string
-  // edx: adjusted start index (smi)
-  // ecx: length (smi)
-  // The subject string can only be external or sequential string of either
-  // encoding at this point.
-  Label two_byte_sequential, runtime_drop_two, sequential_string;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ebx, Immediate(kExternalStringTag));
-  __ j(zero, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(ebx, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &runtime);
-  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&sequential_string);
-  // Stash away (adjusted) index and (underlying) string.
-  __ push(edx);
-  __ push(edi);
-  __ SmiUntag(ecx);
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ test_b(ebx, Immediate(kStringEncodingMask));
-  __ j(zero, &two_byte_sequential);
-
-  // Sequential one byte string.  Allocate the result.
-  __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
-  // eax: result string
-  // ecx: result string length
-  // Locate first character of result.
-  __ mov(edi, eax);
-  __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  // Load string argument and locate character of sub string start.
-  __ pop(edx);
-  __ pop(ebx);
-  __ SmiUntag(ebx);
-  __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
-
-  // eax: result string
-  // ecx: result length
-  // edi: first character of result
-  // edx: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  __ bind(&two_byte_sequential);
-  // Sequential two-byte string.  Allocate the result.
-  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
-  // eax: result string
-  // ecx: result string length
-  // Locate first character of result.
-  __ mov(edi, eax);
-  __ add(edi,
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Load string argument and locate character of sub string start.
-  __ pop(edx);
-  __ pop(ebx);
-  // As from is a smi it is 2 times the value which matches the size of a two
-  // byte character.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
-  // eax: result string
-  // ecx: result length
-  // edi: first character of result
-  // edx: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  // Drop pushed values on the stack before tail call.
-  __ bind(&runtime_drop_two);
-  __ Drop(2);
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // eax: string
-  // ebx: instance type
-  // ecx: sub string length (smi)
-  // edx: from index (smi)
-  StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
-                                  &runtime, RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ ret(3 * kPointerSize);
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in eax.
-  Label is_number;
-  __ JumpIfSmi(eax, &is_number, Label::kNear);
-
-  Label not_string;
-  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
-  // eax: receiver
-  // edi: receiver map
-  __ j(above_equal, &not_string, Label::kNear);
-  __ Ret();
-  __ bind(&not_string);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in eax.
-  Label is_number;
-  __ JumpIfSmi(eax, &is_number, Label::kNear);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
-  // eax: receiver
-  // edi: receiver map
-  __ j(above, &not_name, Label::kNear);
-  __ Ret();
-  __ bind(&not_name);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3228,17 +2935,6 @@
     Mode mode) {
   Label object_is_black, need_incremental, need_incremental_pop_object;
 
-  __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
-  __ and_(regs_.scratch0(), regs_.object());
-  __ mov(regs_.scratch1(),
-         Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset));
-  __ sub(regs_.scratch1(), Immediate(1));
-  __ mov(Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset),
-         regs_.scratch1());
-  __ j(negative, &need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(),
@@ -3580,11 +3276,10 @@
   Label load_smi_map, compare_map;
   Label start_polymorphic;
   Label pop_and_miss;
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   __ push(receiver);
-  __ push(vector);
+  // Value, vector and slot are passed on the stack, so no need to save/restore
+  // them.
 
   Register receiver_map = receiver;
   Register cached_map = vector;
@@ -3605,12 +3300,9 @@
   Register handler = feedback;
   DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
   __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(vector);
   __ pop(receiver);
   __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), handler);
-  __ pop(handler);  // Pop "value".
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(handler);
 
   // Polymorphic, we have to loop from 2 to N
   __ bind(&start_polymorphic);
@@ -3634,11 +3326,8 @@
                                FixedArray::kHeaderSize + kPointerSize));
   __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
-  __ mov(Operand::StaticVariable(virtual_register), handler);
-  __ pop(handler);  // Pop "value".
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(handler);
 
   __ bind(&prepare_next);
   __ add(counter, Immediate(Smi::FromInt(2)));
@@ -3648,7 +3337,6 @@
   // We exhausted our array of map handler pairs.
   __ bind(&pop_and_miss);
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ jmp(miss);
 
@@ -3664,8 +3352,6 @@
                                        Label* miss) {
   // The store ic value is on the stack.
   DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   // feedback initially contains the feedback array
   Label compare_smi_map;
@@ -3681,11 +3367,8 @@
   __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize));
   __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // Put the store ic value back in it's register.
-  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
-  __ pop(weak_cell);  // Pop "value".
   // jump to the handler.
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(weak_cell);
 
   // In microbenchmarks, it made sense to unroll this code so that the call to
   // the handler is duplicated for a HeapObject receiver and a Smi receiver.
@@ -3695,10 +3378,8 @@
   __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize));
   __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
-  __ pop(weak_cell);  // Pop "value".
   // jump to the handler.
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(weak_cell);
 }
 
 void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
@@ -3709,7 +3390,26 @@
   Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
   Label miss;
 
-  __ push(value);
+  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+    // Current stack layout:
+    // - esp[8]    -- value
+    // - esp[4]    -- slot
+    // - esp[0]    -- return address
+    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+    if (in_frame) {
+      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+      // If the vector is not on the stack, then insert the vector beneath
+      // return address in order to prepare for calling handler with
+      // StoreWithVector calling convention.
+      __ push(Operand(esp, 0));
+      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+      __ RecordComment("]");
+    } else {
+      __ mov(vector, Operand(esp, 1 * kPointerSize));
+    }
+    __ mov(slot, Operand(esp, 2 * kPointerSize));
+  }
 
   Register scratch = value;
   __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3733,19 +3433,9 @@
   __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &miss);
 
-  __ pop(value);
-  __ push(slot);
-  __ push(vector);
   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
                                                      no_reg);
-  __ pop(vector);
-  __ pop(slot);
-  Label no_pop_miss;
-  __ jmp(&no_pop_miss);
-
   __ bind(&miss);
-  __ pop(value);
-  __ bind(&no_pop_miss);
   StoreIC::GenerateMiss(masm);
 }
 
@@ -3767,17 +3457,13 @@
   Label load_smi_map, compare_map;
   Label transition_call;
   Label pop_and_miss;
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
-  ExternalReference virtual_slot =
-      ExternalReference::virtual_slot_register(masm->isolate());
 
   __ push(receiver);
-  __ push(vector);
+  // Value, vector and slot are passed on the stack, so no need to save/restore
+  // them.
 
   Register receiver_map = receiver;
   Register cached_map = vector;
-  Register value = StoreDescriptor::ValueRegister();
 
   // Receiver might not be a heap object.
   __ JumpIfSmi(receiver, &load_smi_map);
@@ -3788,15 +3474,18 @@
   __ push(key);
   // Current stack layout:
   // - esp[0]    -- key
-  // - esp[4]    -- vector
-  // - esp[8]    -- receiver
-  // - esp[12]   -- value
-  // - esp[16]   -- return address
+  // - esp[4]    -- receiver
+  // - esp[8]    -- return address
+  // - esp[12]   -- vector
+  // - esp[16]   -- slot
+  // - esp[20]   -- value
   //
-  // Required stack layout for handler call:
+  // Required stack layout for handler call (see StoreWithVectorDescriptor):
   // - esp[0]    -- return address
-  // - receiver, key, value, vector, slot in registers.
-  // - handler in virtual register.
+  // - esp[4]    -- vector
+  // - esp[8]    -- slot
+  // - esp[12]   -- value
+  // - receiver, key, handler in registers.
   Register counter = key;
   __ mov(counter, Immediate(Smi::FromInt(0)));
   __ bind(&next_loop);
@@ -3811,43 +3500,57 @@
   __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
                                 FixedArray::kHeaderSize + 2 * kPointerSize));
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), feedback);
-  __ pop(value);
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(feedback);
 
   __ bind(&transition_call);
   // Current stack layout:
   // - esp[0]    -- key
-  // - esp[4]    -- vector
-  // - esp[8]    -- receiver
-  // - esp[12]   -- value
-  // - esp[16]   -- return address
+  // - esp[4]    -- receiver
+  // - esp[8]    -- return address
+  // - esp[12]   -- vector
+  // - esp[16]   -- slot
+  // - esp[20]   -- value
   //
-  // Required stack layout for handler call:
+  // Required stack layout for handler call (see StoreTransitionDescriptor):
   // - esp[0]    -- return address
-  // - receiver, key, value, map, vector in registers.
-  // - handler and slot in virtual registers.
-  __ mov(Operand::StaticVariable(virtual_slot), slot);
+  // - esp[4]    -- vector
+  // - esp[8]    -- slot
+  // - esp[12]   -- value
+  // - receiver, key, map, handler in registers.
   __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
                                 FixedArray::kHeaderSize + 2 * kPointerSize));
   __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), feedback);
 
   __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
   // The weak cell may have been cleared.
   __ JumpIfSmi(cached_map, &pop_and_miss);
-  DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
-  __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+  DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
+  __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
 
-  // Pop key into place.
+  // Call store transition handler using StoreTransitionDescriptor calling
+  // convention.
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
-  __ pop(value);
-  __ jmp(Operand::StaticVariable(virtual_register));
+  // Ensure that the transition handler we are going to call has the same
+  // number of stack arguments which means that we don't have to adapt them
+  // before the call.
+  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+  STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kValue ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kValue);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kSlot ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kSlot);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kVector ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kVector);
+  __ jmp(feedback);
 
   __ bind(&prepare_next);
   __ add(counter, Immediate(Smi::FromInt(3)));
@@ -3857,7 +3560,6 @@
   // We exhausted our array of map handler pairs.
   __ bind(&pop_and_miss);
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ jmp(miss);
 
@@ -3874,7 +3576,26 @@
   Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
   Label miss;
 
-  __ push(value);
+  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+    // Current stack layout:
+    // - esp[8]    -- value
+    // - esp[4]    -- slot
+    // - esp[0]    -- return address
+    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+    if (in_frame) {
+      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+      // If the vector is not on the stack, then insert the vector beneath
+      // return address in order to prepare for calling handler with
+      // StoreWithVector calling convention.
+      __ push(Operand(esp, 0));
+      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+      __ RecordComment("]");
+    } else {
+      __ mov(vector, Operand(esp, 1 * kPointerSize));
+    }
+    __ mov(slot, Operand(esp, 2 * kPointerSize));
+  }
 
   Register scratch = value;
   __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3899,8 +3620,6 @@
   __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &try_poly_name);
 
-  __ pop(value);
-
   Handle<Code> megamorphic_stub =
       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
@@ -3917,7 +3636,6 @@
                              &miss);
 
   __ bind(&miss);
-  __ pop(value);
   KeyedStoreIC::GenerateMiss(masm);
 }
 
@@ -4564,7 +4282,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
     __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4953,7 +4671,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
   __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index f1972b9..220484c 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -39,19 +39,11 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() {
-  return no_reg;
-}
+const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
 
+const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
 
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-
-
-const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
-
+const Register StoreTransitionDescriptor::MapRegister() { return edi; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
@@ -365,7 +357,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       edi,  // callee
@@ -400,7 +392,19 @@
       eax,  // argument count (not including receiver)
       edx,  // new target
       edi,  // constructor
-      ebx,  // address of first argument
+      ebx,  // allocation site feedback
+      ecx,  // address of first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // argument count (not including receiver)
+      edx,  // target to the call. It is checked to be Array function.
+      ebx,  // allocation site feedback
+      ecx,  // address of first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 83c7ce8..2bd8760 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -173,9 +173,8 @@
 void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
                                 Label* condition_met,
                                 Label::Distance distance) {
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
+                condition_met, distance);
 }
 
 
@@ -1545,7 +1544,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 08cc7ce..2220ca7 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -798,6 +798,24 @@
   // may be bigger than 2^16 - 1.  Requires a scratch register.
   void Ret(int bytes_dropped, Register scratch);
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp (on ia32 it's at least return address).
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 1) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    DCHECK_LT(parameter_index, Descriptor::kParameterCount);
+    DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
+              parameter_index);
+    int offset = (Descriptor::kParameterCount - parameter_index - 1 +
+                  sp_to_ra_offset_in_words) *
+                 kPointerSize;
+    mov(reg, Operand(esp, offset));
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the esp register.
   void Drop(int element_count);
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index 4ed765e..691fe3d 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -111,15 +111,21 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ push(vector);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
   __ push(slot);
+  __ push(vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ pop(slot);
   __ pop(vector);
+  __ pop(slot);
 }
 
 
@@ -129,6 +135,13 @@
   __ add(sp, sp, Operand(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -330,24 +343,6 @@
   __ TailCallStub(&stub);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -366,12 +361,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -629,6 +618,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index fee6ebf..10ec578 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -441,10 +441,11 @@
 
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 
@@ -454,6 +455,13 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 277b4e7..3f97fdd 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -20,15 +20,21 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
   __ Push(slot);
+  __ Push(vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(slot);
   __ Pop(vector);
+  __ Pop(slot);
 }
 
 
@@ -38,6 +44,13 @@
   __ Drop(2);
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -323,25 +336,6 @@
   __ Ret();
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -398,12 +392,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -664,6 +652,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 9d66eb2..fa9d7c1 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -445,10 +445,11 @@
 
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 
@@ -458,6 +459,14 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index b6b81de..3b2e115 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -129,13 +129,13 @@
 
 Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
   Label miss;
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     PushVectorAndSlot();
   }
   Register reg = FrontendHeader(receiver(), name, &miss, RETURN_HOLDER);
   FrontendFooter(name, &miss);
   // The footer consumes the vector and slot from the stack if miss occurs.
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     DiscardVectorAndSlot();
   }
   return reg;
@@ -209,12 +209,12 @@
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
     Handle<Name> name) {
   Label miss;
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     DCHECK(kind() == Code::LOAD_IC);
     PushVectorAndSlot();
   }
   NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     DiscardVectorAndSlot();
   }
   GenerateLoadConstant(isolate()->factory()->undefined_value());
@@ -247,7 +247,7 @@
 
 
 void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     if (holder_reg.is(receiver())) {
       PushVectorAndSlot();
     } else {
@@ -260,7 +260,7 @@
 
 void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
                                                         PopMode mode) {
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     if (mode == DISCARD) {
       DiscardVectorAndSlot();
     } else {
@@ -438,7 +438,31 @@
     Handle<Map> transition, Handle<Name> name) {
   Label miss;
 
-  PushVectorAndSlot();
+  // Ensure that the StoreTransitionStub we are going to call has the same
+  // number of stack arguments. This means that we don't have to adapt them
+  // if we decide to call the transition or miss stub.
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount ==
+                StoreTransitionDescriptor::kStackArgumentsCount);
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 0 ||
+                Descriptor::kStackArgumentsCount == 3);
+  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kValue ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kValue);
+  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kSlot ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kSlot);
+  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kVector ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kVector);
+
+  if (Descriptor::kPassLastArgsOnStack) {
+    __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+  }
+
+  bool need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
+  if (need_save_restore) {
+    PushVectorAndSlot();
+  }
 
   // Check that we are allowed to write this.
   bool is_nonexistent = holder()->map() == transition->GetBackPointer();
@@ -470,23 +494,17 @@
   DCHECK(!transition->is_access_check_needed());
 
   // Call to respective StoreTransitionStub.
-  bool virtual_args = StoreTransitionHelper::HasVirtualSlotArg();
-  Register map_reg = StoreTransitionHelper::MapRegister();
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
 
   if (details.type() == DATA_CONSTANT) {
     DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
-    Register tmp =
-        virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
-    GenerateRestoreMap(transition, tmp, scratch2(), &miss);
-    GenerateConstantCheck(tmp, descriptor, value(), scratch2(), &miss);
-    if (virtual_args) {
-      // This will move the map from tmp into map_reg.
-      RearrangeVectorAndSlot(tmp, map_reg);
-    } else {
+    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
+    GenerateConstantCheck(map_reg, descriptor, value(), scratch1(), &miss);
+    if (need_save_restore) {
       PopVectorAndSlot();
     }
     GenerateRestoreName(name);
-    StoreTransitionStub stub(isolate());
+    StoreMapStub stub(isolate());
     GenerateTailCall(masm(), stub.GetCode());
 
   } else {
@@ -498,24 +516,29 @@
         Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
             ? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
             : StoreTransitionStub::StoreMapAndValue;
-
-    Register tmp =
-        virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
-    GenerateRestoreMap(transition, tmp, scratch2(), &miss);
-    if (virtual_args) {
-      RearrangeVectorAndSlot(tmp, map_reg);
-    } else {
+    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
+    if (need_save_restore) {
       PopVectorAndSlot();
     }
-    GenerateRestoreName(name);
-    StoreTransitionStub stub(isolate(),
-                             FieldIndex::ForDescriptor(*transition, descriptor),
-                             representation, store_mode);
+    // We need to pass name on the stack.
+    PopReturnAddress(this->name());
+    __ Push(name);
+    PushReturnAddress(this->name());
+
+    FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
+    __ Move(StoreNamedTransitionDescriptor::FieldOffsetRegister(),
+            Smi::FromInt(index.index() << kPointerSizeLog2));
+
+    StoreTransitionStub stub(isolate(), index.is_inobject(), representation,
+                             store_mode);
     GenerateTailCall(masm(), stub.GetCode());
   }
 
-  GenerateRestoreName(&miss, name);
-  PopVectorAndSlot();
+  __ bind(&miss);
+  if (need_save_restore) {
+    PopVectorAndSlot();
+  }
+  GenerateRestoreName(name);
   TailCallBuiltin(masm(), MissBuiltin(kind()));
 
   return GetCode(kind(), name);
@@ -534,7 +557,10 @@
   FieldType* field_type = *it->GetFieldType();
   bool need_save_restore = false;
   if (RequiresFieldTypeChecks(field_type)) {
-    need_save_restore = IC::ICUseVector(kind());
+    need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
+    if (Descriptor::kPassLastArgsOnStack) {
+      __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+    }
     if (need_save_restore) PushVectorAndSlot();
     GenerateFieldTypeChecks(field_type, value(), &miss);
     if (need_save_restore) PopVectorAndSlot();
@@ -568,6 +594,9 @@
     GenerateTailCall(masm(), slow_stub);
   }
   Register holder = Frontend(name);
+  if (Descriptor::kPassLastArgsOnStack) {
+    __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+  }
   GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
                           receiver(), scratch2(), true, value(), holder,
                           accessor_index);
@@ -601,13 +630,21 @@
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
     return KeyedLoadSloppyArgumentsStub(isolate).GetCode();
   }
+  bool is_js_array = instance_type == JS_ARRAY_TYPE;
   if (elements_kind == DICTIONARY_ELEMENTS) {
+    if (FLAG_tf_load_ic_stub) {
+      int config = KeyedLoadElementsKind::encode(elements_kind) |
+                   KeyedLoadConvertHole::encode(false) |
+                   KeyedLoadIsJsArray::encode(is_js_array) |
+                   LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
+      return handle(Smi::FromInt(config), isolate);
+    }
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
     return LoadDictionaryElementStub(isolate).GetCode();
   }
   DCHECK(IsFastElementsKind(elements_kind) ||
          IsFixedTypedArrayElementsKind(elements_kind));
-  bool is_js_array = instance_type == JS_ARRAY_TYPE;
+  // TODO(jkummerow): Use IsHoleyElementsKind(elements_kind).
   bool convert_hole_to_undefined =
       is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
       *receiver_map == isolate->get_initial_js_array_map(elements_kind);
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 525889b..63ca050 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -53,6 +53,9 @@
 
   void DiscardVectorAndSlot();
 
+  void PushReturnAddress(Register tmp);
+  void PopReturnAddress(Register tmp);
+
   // TODO(verwaest): Make non-static.
   static void GenerateApiAccessorCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
@@ -212,13 +215,24 @@
 
 class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
  public:
+  // All store handlers use StoreWithVectorDescriptor calling convention.
+  typedef StoreWithVectorDescriptor Descriptor;
+
   explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<Map> map,
                                      Handle<JSObject> holder)
       : PropertyHandlerCompiler(isolate, Code::STORE_IC, map, holder,
-                                kCacheOnReceiver) {}
+                                kCacheOnReceiver) {
+#ifdef DEBUG
+    if (Descriptor::kPassLastArgsOnStack) {
+      ZapStackArgumentsRegisterAliases();
+    }
+#endif
+  }
 
   virtual ~NamedStoreHandlerCompiler() {}
 
+  void ZapStackArgumentsRegisterAliases();
+
   Handle<Code> CompileStoreTransition(Handle<Map> transition,
                                       Handle<Name> name);
   Handle<Code> CompileStoreField(LookupIterator* it);
@@ -249,10 +263,6 @@
   virtual void FrontendFooter(Handle<Name> name, Label* miss);
   void GenerateRestoreName(Label* label, Handle<Name> name);
 
-  // Pop the vector and slot into appropriate registers, moving the map in
-  // the process. (This is an accomodation for register pressure on ia32).
-  void RearrangeVectorAndSlot(Register current_map, Register destination_map);
-
  private:
   void GenerateRestoreName(Handle<Name> name);
   void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
@@ -283,8 +293,6 @@
                                             Isolate* isolate);
   void CompileElementHandlers(MapHandleList* receiver_maps,
                               List<Handle<Object>>* handlers);
-
-  static void GenerateStoreSlow(MacroAssembler* masm);
 };
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index b332f11..06c58b8 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -59,15 +59,21 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ push(vector);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
   __ push(slot);
+  __ push(vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ pop(slot);
   __ pop(vector);
+  __ pop(slot);
 }
 
 
@@ -77,6 +83,15 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ pop(tmp);
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -150,12 +165,16 @@
   DCHECK(!accessor_holder.is(scratch));
   // Copy return value.
   __ pop(scratch);
-  // receiver
-  __ push(receiver);
-  // Write the arguments to stack frame.
+
   if (is_store) {
-    DCHECK(!receiver.is(store_parameter));
-    DCHECK(!scratch.is(store_parameter));
+    // Discard stack arguments.
+    __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+                          kPointerSize));
+  }
+  // Write the receiver and arguments to stack frame.
+  __ push(receiver);
+  if (is_store) {
+    DCHECK(!AreAliased(receiver, scratch, store_parameter));
     __ push(store_parameter);
   }
   __ push(scratch);
@@ -252,8 +271,13 @@
     MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
     int accessor_index, int expected_arguments, Register scratch) {
   // ----------- S t a t e -------------
-  //  -- esp[0] : return address
+  //  -- esp[12] : value
+  //  -- esp[8]  : slot
+  //  -- esp[4]  : vector
+  //  -- esp[0]  : return address
   // -----------------------------------
+  __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
@@ -290,7 +314,14 @@
     // Restore context register.
     __ pop(esi);
   }
-  __ ret(0);
+  if (accessor_index >= 0) {
+    __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
+  } else {
+    // If we generate a global code snippet for deoptimization only, don't try
+    // to drop stack arguments for the StoreIC because they are not a part of
+    // expression stack and deoptimizer does not reconstruct them.
+    __ ret(0);
+  }
 }
 
 
@@ -316,32 +347,6 @@
   __ CallRuntime(id);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-
-  __ xchg(receiver, Operand(esp, 0));
-  __ push(name);
-  __ push(value);
-  __ push(slot);
-  __ push(vector);
-  __ push(receiver);  // which contains the return address.
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -360,19 +365,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
-  DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
-  ExternalReference virtual_slot =
-      ExternalReference::virtual_slot_register(isolate());
-  __ mov(destination_map, current_map);
-  __ pop(current_map);
-  __ mov(Operand::StaticVariable(virtual_slot), current_map);
-  __ pop(current_map);  // put vector in place.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -532,7 +524,7 @@
     Label success;
     __ jmp(&success);
     __ bind(miss);
-    if (IC::ICUseVector(kind())) {
+    if (IC::ShouldPushPopSlotAndVector(kind())) {
       DCHECK(kind() == Code::LOAD_IC);
       PopVectorAndSlot();
     }
@@ -547,7 +539,7 @@
     Label success;
     __ jmp(&success);
     GenerateRestoreName(miss, name);
-    if (IC::ICUseVector(kind())) PopVectorAndSlot();
+    DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -641,13 +633,26 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  // Zap register aliases of the arguments passed on the stack to ensure they
+  // are properly loaded by the handler (debug-only).
+  STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+  __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
+  __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
+  __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
     LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
+  __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
 
   __ pop(scratch1());  // remove the return address
+  // Discard stack arguments.
+  __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+                        kPointerSize));
   __ push(receiver());
   __ push(holder_reg);
   // If the callback cannot leak, then push the callback directly,
@@ -679,7 +684,7 @@
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     PushVectorAndSlot();
   }
   FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
@@ -701,7 +706,7 @@
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     DiscardVectorAndSlot();
   }
   __ ret(0);
diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc
index d93b67b..a52f046 100644
--- a/src/ic/ia32/ic-compiler-ia32.cc
+++ b/src/ic/ia32/ic-compiler-ia32.cc
@@ -15,14 +15,21 @@
 
 void PropertyICCompiler::GenerateRuntimeSetProperty(
     MacroAssembler* masm, LanguageMode language_mode) {
-  // Return address is on the stack.
-  DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
-         !ebx.is(StoreDescriptor::NameRegister()) &&
-         !ebx.is(StoreDescriptor::ValueRegister()));
+  typedef StoreWithVectorDescriptor Descriptor;
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+  // ----------- S t a t e -------------
+  //  -- esp[12] : value
+  //  -- esp[8]  : slot
+  //  -- esp[4]  : vector
+  //  -- esp[0]  : return address
+  // -----------------------------------
+  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+                                        Descriptor::kValue);
+
+  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
+  __ mov(Operand(esp, 8), Descriptor::NameRegister());
+  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
   __ pop(ebx);
-  __ push(StoreDescriptor::ReceiverRegister());
-  __ push(StoreDescriptor::NameRegister());
-  __ push(StoreDescriptor::ValueRegister());
   __ push(Immediate(Smi::FromInt(language_mode)));
   __ push(ebx);  // return address
 
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 0550d92..b7496d4 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -409,7 +409,7 @@
   }
   // It's irrelevant whether array is smi-only or not when writing a smi.
   __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&non_smi_value);
   // Escape to elements kind transition case.
@@ -428,7 +428,7 @@
   __ mov(edx, value);  // Preserve the value which is returned.
   __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(fast_double);
   if (check_map == kCheckMap) {
@@ -457,7 +457,7 @@
     __ add(FieldOperand(receiver, JSArray::kLengthOffset),
            Immediate(Smi::FromInt(1)));
   }
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&transition_smi_elements);
   __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -504,12 +504,13 @@
 
 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                        LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
   // Return address is on the stack.
   Label slow, fast_object, fast_object_grow;
   Label fast_double, fast_double_grow;
   Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
+  Register receiver = Descriptor::ReceiverRegister();
+  Register key = Descriptor::NameRegister();
   DCHECK(receiver.is(edx));
   DCHECK(key.is(ecx));
 
@@ -522,6 +523,10 @@
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
+
+  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+                                        Descriptor::kValue);
+
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
@@ -551,22 +556,9 @@
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
 
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ push(Immediate(Smi::FromInt(slot)));
-  __ push(Immediate(dummy_vector));
-
   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
                                                      no_reg);
 
-  __ pop(StoreWithVectorDescriptor::VectorRegister());
-  __ pop(StoreWithVectorDescriptor::SlotRegister());
-
   // Cache miss.
   __ jmp(&miss);
 
@@ -705,18 +697,21 @@
 }
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
+  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+  Register name = StoreWithVectorDescriptor::NameRegister();
 
-  __ xchg(receiver, Operand(esp, 0));
+  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+  // Current stack layout:
+  // - esp[12]   -- value
+  // - esp[8]    -- slot
+  // - esp[4]    -- vector
+  // - esp[0]    -- return address
+
+  Register return_address = StoreWithVectorDescriptor::SlotRegister();
+  __ pop(return_address);
+  __ push(receiver);
   __ push(name);
-  __ push(value);
-  __ push(slot);
-  __ push(vector);
-  __ push(receiver);  // Contains the return address.
+  __ push(return_address);
 }
 
 
@@ -730,32 +725,33 @@
 
 
 void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  typedef StoreWithVectorDescriptor Descriptor;
   Label restore_miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
+  Register receiver = Descriptor::ReceiverRegister();
+  Register name = Descriptor::NameRegister();
+  Register value = Descriptor::ValueRegister();
+  // Since the slot and vector values are passed on the stack we can use
+  // respective registers as scratch registers.
+  Register scratch1 = Descriptor::VectorRegister();
+  Register scratch2 = Descriptor::SlotRegister();
 
-  // A lot of registers are needed for storing to slow case
-  // objects. Push and restore receiver but rely on
-  // GenerateDictionaryStore preserving the value and name.
+  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
+
+  // A lot of registers are needed for storing to slow case objects.
+  // Push and restore receiver but rely on GenerateDictionaryStore preserving
+  // the value and name.
   __ push(receiver);
-  __ push(vector);
-  __ push(slot);
 
-  Register dictionary = ebx;
+  Register dictionary = receiver;
   __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
   GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          receiver, edi);
-  __ Drop(3);
+                          scratch1, scratch2);
+  __ Drop(1);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(0);
+  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&restore_miss);
-  __ pop(slot);
-  __ pop(vector);
   __ pop(receiver);
   __ IncrementCounter(counters->ic_store_normal_miss(), 1);
   GenerateMiss(masm);
@@ -770,6 +766,13 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 #undef __
 
diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc
index 939e7fc..82700d3 100644
--- a/src/ic/ia32/stub-cache-ia32.cc
+++ b/src/ic/ia32/stub-cache-ia32.cc
@@ -22,8 +22,6 @@
   ExternalReference key_offset(stub_cache->key_reference(table));
   ExternalReference value_offset(stub_cache->value_reference(table));
   ExternalReference map_offset(stub_cache->map_reference(table));
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   Label miss;
   Code::Kind ic_kind = stub_cache->ic_kind();
@@ -55,19 +53,15 @@
     }
 #endif
 
-    // The vector and slot were pushed onto the stack before starting the
-    // probe, and need to be dropped before calling the handler.
     if (is_vector_store) {
-      // The overlap here is rather embarrassing. One does what one must.
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
+      // The value, vector and slot were passed to the IC on the stack and
+      // they are still there. So we can just jump to the handler.
       DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
       __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ pop(vector);
-      __ mov(Operand::StaticVariable(virtual_register), extra);
-      __ pop(extra);  // Pop "slot".
-      // Jump to the first instruction in the code stub.
-      __ jmp(Operand::StaticVariable(virtual_register));
+      __ jmp(extra);
     } else {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
       __ pop(LoadWithVectorDescriptor::VectorRegister());
       __ pop(LoadDescriptor::SlotRegister());
       __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -110,19 +104,10 @@
 
     // Jump to the first instruction in the code stub.
     if (is_vector_store) {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
       DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ mov(Operand::StaticVariable(virtual_register), offset);
-      __ pop(vector);
-      __ pop(offset);  // Pop "slot".
-      __ jmp(Operand::StaticVariable(virtual_register));
-    } else {
-      __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(offset);
     }
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
 
     // Pop at miss.
     __ bind(&miss);
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index f77c40a..4fc8ada 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -7,7 +7,6 @@
 
 #include "src/ic/ic.h"
 
-#include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/macro-assembler.h"
 #include "src/prototype.h"
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index d157c92..ea1f16c 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -189,15 +189,14 @@
 #undef GENERATE
 }
 
-
-Type* BinaryOpICState::GetResultType() const {
+AstType* BinaryOpICState::GetResultType() const {
   Kind result_kind = result_kind_;
   if (HasSideEffects()) {
     result_kind = NONE;
   } else if (result_kind == GENERIC && op_ == Token::ADD) {
-    return Type::NumberOrString();
+    return AstType::NumberOrString();
   } else if (result_kind == NUMBER && op_ == Token::SHR) {
-    return Type::Unsigned32();
+    return AstType::Unsigned32();
   }
   DCHECK_NE(GENERIC, result_kind);
   return KindToType(result_kind);
@@ -318,20 +317,20 @@
 
 
 // static
-Type* BinaryOpICState::KindToType(Kind kind) {
+AstType* BinaryOpICState::KindToType(Kind kind) {
   switch (kind) {
     case NONE:
-      return Type::None();
+      return AstType::None();
     case SMI:
-      return Type::SignedSmall();
+      return AstType::SignedSmall();
     case INT32:
-      return Type::Signed32();
+      return AstType::Signed32();
     case NUMBER:
-      return Type::Number();
+      return AstType::Number();
     case STRING:
-      return Type::String();
+      return AstType::String();
     case GENERIC:
-      return Type::Any();
+      return AstType::Any();
   }
   UNREACHABLE();
   return NULL;
@@ -365,29 +364,28 @@
   return NULL;
 }
 
-
-Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
+AstType* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
   switch (state) {
     case UNINITIALIZED:
-      return Type::None();
+      return AstType::None();
     case BOOLEAN:
-      return Type::Boolean();
+      return AstType::Boolean();
     case SMI:
-      return Type::SignedSmall();
+      return AstType::SignedSmall();
     case NUMBER:
-      return Type::Number();
+      return AstType::Number();
     case STRING:
-      return Type::String();
+      return AstType::String();
     case INTERNALIZED_STRING:
-      return Type::InternalizedString();
+      return AstType::InternalizedString();
     case UNIQUE_NAME:
-      return Type::UniqueName();
+      return AstType::UniqueName();
     case RECEIVER:
-      return Type::Receiver();
+      return AstType::Receiver();
     case KNOWN_RECEIVER:
-      return map.is_null() ? Type::Receiver() : Type::Class(map, zone);
+      return map.is_null() ? AstType::Receiver() : AstType::Class(map, zone);
     case GENERIC:
-      return Type::Any();
+      return AstType::Any();
   }
   UNREACHABLE();
   return NULL;
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index 6888a7a..38be57a 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -6,6 +6,7 @@
 #define V8_IC_STATE_H_
 
 #include "src/macro-assembler.h"
+#include "src/parsing/token.h"
 
 namespace v8 {
 namespace internal {
@@ -120,9 +121,9 @@
   Token::Value op() const { return op_; }
   Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
 
-  Type* GetLeftType() const { return KindToType(left_kind_); }
-  Type* GetRightType() const { return KindToType(right_kind_); }
-  Type* GetResultType() const;
+  AstType* GetLeftType() const { return KindToType(left_kind_); }
+  AstType* GetRightType() const { return KindToType(right_kind_); }
+  AstType* GetResultType() const;
 
   void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
 
@@ -140,7 +141,7 @@
   Kind UpdateKind(Handle<Object> object, Kind kind) const;
 
   static const char* KindToString(Kind kind);
-  static Type* KindToType(Kind kind);
+  static AstType* KindToType(Kind kind);
   static bool KindMaybeSmi(Kind kind) {
     return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
   }
@@ -202,8 +203,8 @@
     GENERIC
   };
 
-  static Type* StateToType(Zone* zone, State state,
-                           Handle<Map> map = Handle<Map>());
+  static AstType* StateToType(Zone* zone, State state,
+                              Handle<Map> map = Handle<Map>());
 
   static State NewInputState(State old_state, Handle<Object> value);
 
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index b72791a..0e751bd 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -183,6 +183,19 @@
   extra_ic_state_ = target->extra_ic_state();
 }
 
+// The ICs that don't pass slot and vector through the stack have to
+// save/restore them in the dispatcher.
+bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
+  if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
+      kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC) {
+    return true;
+  }
+  if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
+    return !StoreWithVectorDescriptor::kPassLastArgsOnStack;
+  }
+  return false;
+}
+
 InlineCacheState IC::StateFromCode(Code* code) {
   Isolate* isolate = code->GetIsolate();
   switch (code->kind()) {
@@ -231,13 +244,6 @@
   return code;
 }
 
-
-bool IC::AddressIsOptimizedCode() const {
-  Code* host =
-      isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
-  return host->kind() == Code::OPTIMIZED_FUNCTION;
-}
-
 static void LookupForRead(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -270,7 +276,7 @@
   }
 }
 
-bool IC::ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name) {
+bool IC::ShouldRecomputeHandler(Handle<String> name) {
   if (!RecomputeHandlerForName(name)) return false;
 
   DCHECK(UseVector());
@@ -320,7 +326,7 @@
   // Remove the target from the code cache if it became invalid
   // because of changes in the prototype chain to avoid hitting it
   // again.
-  if (ShouldRecomputeHandler(receiver, Handle<String>::cast(name))) {
+  if (ShouldRecomputeHandler(Handle<String>::cast(name))) {
     MarkRecomputeHandler(name);
   }
 }
@@ -728,7 +734,6 @@
 
   number_of_valid_maps++;
   if (number_of_valid_maps > 1 && is_keyed()) return false;
-  Handle<Code> ic;
   if (number_of_valid_maps == 1) {
     ConfigureVectorState(name, receiver_map(), code);
   } else {
@@ -1413,17 +1418,18 @@
                                Object);
   } else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
              !object->IsJSValue()) {
-    if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
-      Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
-      if (object->IsString() || key->IsSmi()) UpdateLoadElement(receiver);
+    if ((object->IsJSObject() && key->IsSmi()) ||
+        (object->IsString() && key->IsNumber())) {
+      UpdateLoadElement(Handle<HeapObject>::cast(object));
+      TRACE_IC("LoadIC", key);
     }
   }
 
   if (!is_vector_set()) {
     ConfigureVectorState(MEGAMORPHIC, key);
     TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+    TRACE_IC("LoadIC", key);
   }
-  TRACE_IC("LoadIC", key);
 
   if (!load_handle.is_null()) return load_handle;
 
@@ -2237,7 +2243,8 @@
 RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
+  // Runtime functions don't follow the IC's calling convention.
   Handle<Object> function = args.at<Object>(0);
   Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
   Handle<Smi> slot = args.at<Smi>(2);
@@ -2253,9 +2260,9 @@
 RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-
   DCHECK_EQ(4, args.length());
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> receiver = args.at<Object>(0);
   Handle<Smi> slot = args.at<Smi>(2);
   Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
   FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2294,6 +2301,7 @@
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
+  // Runtime functions don't follow the IC's calling convention.
   Handle<JSGlobalObject> global = isolate->global_object();
   Handle<Smi> slot = args.at<Smi>(0);
   Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
@@ -2364,10 +2372,10 @@
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
+  DCHECK_EQ(4, args.length());
+  // Runtime functions don't follow the IC's calling convention.
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
-
-  DCHECK(args.length() == 4);
   Handle<Smi> slot = args.at<Smi>(2);
   Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
   FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2381,8 +2389,8 @@
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK_EQ(4, args.length());
   typedef LoadWithVectorDescriptor Descriptor;
+  DCHECK_EQ(Descriptor::kParameterCount, args.length());
   Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
   Handle<Object> key = args.at<Object>(Descriptor::kName);
   Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
@@ -2400,13 +2408,13 @@
 RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Name> key = args.at<Name>(1);
-  Handle<Object> value = args.at<Object>(2);
-
-  DCHECK(args.length() == 5 || args.length() == 6);
-  Handle<Smi> slot = args.at<Smi>(3);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+  DCHECK_EQ(5, args.length());
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+  Handle<Object> receiver = args.at<Object>(3);
+  Handle<Name> key = args.at<Name>(4);
   FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
   if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
     StoreICNexus nexus(vector, vector_slot);
@@ -2424,88 +2432,17 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
-  HandleScope scope(isolate);
-  DCHECK_EQ(5, args.length());
-  typedef StoreWithVectorDescriptor Descriptor;
-  Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
-  Handle<Name> key = args.at<Name>(Descriptor::kName);
-  Handle<Object> value = args.at<Object>(Descriptor::kValue);
-  Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
-  Handle<TypeFeedbackVector> vector =
-      args.at<TypeFeedbackVector>(Descriptor::kVector);
-
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
-    StoreICNexus nexus(vector, vector_slot);
-    StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-  } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
-              vector->GetKind(vector_slot));
-    KeyedStoreICNexus nexus(vector, vector_slot);
-    KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-  }
-}
-
-RUNTIME_FUNCTION(Runtime_TransitionStoreIC_MissFromStubFailure) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
-  HandleScope scope(isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Name> key = args.at<Name>(1);
-  Handle<Object> value = args.at<Object>(2);
-
-  int length = args.length();
-  DCHECK(length == 5 || length == 6);
-  // TODO(ishell): use VectorStoreTransitionDescriptor indices here and update
-  // this comment:
-  //
-  // We might have slot and vector, for a normal miss (slot(3), vector(4)).
-  // Or, map and vector for a transitioning store miss (map(3), vector(4)).
-  // In this case, we need to recover the slot from a virtual register.
-  // If length == 6, then a map is included (map(3), slot(4), vector(5)).
-  Handle<Smi> slot;
-  Handle<TypeFeedbackVector> vector;
-  if (length == 5) {
-    vector = args.at<TypeFeedbackVector>(4);
-    slot = handle(
-        *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
-        isolate);
-  } else {
-    vector = args.at<TypeFeedbackVector>(5);
-    slot = args.at<Smi>(4);
-  }
-
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
-    StoreICNexus nexus(vector, vector_slot);
-    StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-  } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
-              vector->GetKind(vector_slot));
-    KeyedStoreICNexus nexus(vector, vector_slot);
-    KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-  }
-}
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
-  Handle<Smi> slot = args.at<Smi>(3);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+  Handle<Object> receiver = args.at<Object>(3);
+  Handle<Object> key = args.at<Object>(4);
   FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
   KeyedStoreICNexus nexus(vector, vector_slot);
   KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2514,31 +2451,14 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
-  HandleScope scope(isolate);
-  DCHECK_EQ(5, args.length());
-  typedef StoreWithVectorDescriptor Descriptor;
-  Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
-  Handle<Object> key = args.at<Object>(Descriptor::kName);
-  Handle<Object> value = args.at<Object>(Descriptor::kValue);
-  Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
-  Handle<TypeFeedbackVector> vector =
-      args.at<TypeFeedbackVector>(Descriptor::kVector);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  KeyedStoreICNexus nexus(vector, vector_slot);
-  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-  ic.UpdateState(receiver, key);
-  RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-}
-
-
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> value = args.at<Object>(0);
+  // slot and vector parameters are not used.
+  Handle<Object> object = args.at<Object>(3);
+  Handle<Object> key = args.at<Object>(4);
   LanguageMode language_mode;
   KeyedStoreICNexus nexus(isolate);
   KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2552,16 +2472,14 @@
 RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  // Length == 5 or 6, depending on whether the vector slot
-  // is passed in a virtual register or not.
-  DCHECK(args.length() == 5 || args.length() == 6);
+  // Runtime functions don't follow the IC's calling convention.
   Handle<Object> object = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
   Handle<Map> map = args.at<Map>(3);
   LanguageMode language_mode;
   KeyedStoreICNexus nexus(isolate);
-  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   language_mode = ic.language_mode();
   if (object->IsJSObject()) {
     JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
@@ -3000,35 +2918,5 @@
 
   return *result;
 }
-
-
-RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
-  HandleScope scope(isolate);
-  DCHECK_EQ(4, args.length());
-  typedef LoadWithVectorDescriptor Descriptor;
-  Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
-  Handle<Name> key = args.at<Name>(Descriptor::kName);
-  Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
-  Handle<TypeFeedbackVector> vector =
-      args.at<TypeFeedbackVector>(Descriptor::kVector);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
-  // LoadIC miss handler if the handler misses. Since the vector Nexus is
-  // set up outside the IC, handle that here.
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
-    LoadICNexus nexus(vector, vector_slot);
-    LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
-  } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
-              vector->GetKind(vector_slot));
-    KeyedLoadICNexus nexus(vector, vector_slot);
-    KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-    ic.UpdateState(receiver, key);
-    RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
-  }
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 35f3844..bf395f1 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -75,6 +75,10 @@
            kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
   }
 
+  // The ICs that don't pass slot and vector through the stack have to
+  // save/restore them in the dispatcher.
+  static bool ShouldPushPopSlotAndVector(Code::Kind kind);
+
   static InlineCacheState StateFromCode(Code* code);
 
  protected:
@@ -87,7 +91,6 @@
   // Get the code object of the caller.
   Code* GetCode() const;
 
-  bool AddressIsOptimizedCode() const;
   inline bool AddressIsDeoptimizedCode() const;
   inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
                                               Address address);
@@ -168,7 +171,7 @@
            kind_ == Code::KEYED_STORE_IC);
     return kind_;
   }
-  bool ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name);
+  bool ShouldRecomputeHandler(Handle<String> name);
 
   ExtraICState extra_ic_state() const { return extra_ic_state_; }
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index f4e0f0b..df7a0df 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -107,13 +107,19 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector, slot);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
+  __ Push(slot, vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(vector, slot);
+  __ Pop(slot, vector);
 }
 
 
@@ -123,6 +129,13 @@
   __ Addu(sp, sp, Operand(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in ra register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in ra register.
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -317,24 +330,6 @@
   __ TailCallStub(&stub);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -353,12 +348,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -615,6 +604,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
@@ -675,7 +667,7 @@
     DiscardVectorAndSlot();
   }
   __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, result);
+  __ Move(v0, result);  // Ensure the stub returns correct value.
 
   FrontendFooter(name, &miss);
 
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 3a28b13..ce9e3d9 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -494,7 +494,8 @@
   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
   __ sw(value, MemOperand(address));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&non_smi_value);
   // Escape to elements kind transition case.
@@ -514,7 +515,8 @@
   __ mov(scratch, value);  // Preserve the value which is returned.
   __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(fast_double);
   if (check_map == kCheckMap) {
@@ -543,7 +545,8 @@
     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&transition_smi_elements);
   // Transition the array appropriately depending on the value type.
@@ -710,10 +713,11 @@
 
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 
@@ -723,6 +727,14 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -748,7 +760,8 @@
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&miss);
   __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 53b097f..2190f6d 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -107,13 +107,19 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector, slot);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
+  __ Push(slot, vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(vector, slot);
+  __ Pop(slot, vector);
 }
 
 
@@ -123,6 +129,13 @@
   __ Daddu(sp, sp, Operand(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in ra register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in ra register.
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -317,24 +330,6 @@
   __ TailCallStub(&stub);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -353,12 +348,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -615,6 +604,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
@@ -675,7 +667,7 @@
     DiscardVectorAndSlot();
   }
   __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, result);
+  __ Move(v0, result);  // Ensure the stub returns correct value.
 
   FrontendFooter(name, &miss);
 
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index b551bc7..c2f3cb6 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -496,7 +496,8 @@
   __ SmiScale(scratch, key, kPointerSizeLog2);
   __ Daddu(address, address, scratch);
   __ sd(value, MemOperand(address));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&non_smi_value);
   // Escape to elements kind transition case.
@@ -518,7 +519,8 @@
   __ mov(scratch, value);  // Preserve the value which is returned.
   __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(fast_double);
   if (check_map == kCheckMap) {
@@ -549,7 +551,8 @@
     __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
     __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&transition_smi_elements);
   // Transition the array appropriately depending on the value type.
@@ -714,10 +717,11 @@
 
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 
@@ -727,6 +731,14 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   StoreIC_PushArgs(masm);
 
@@ -750,7 +762,8 @@
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ Move(v0, value);  // Ensure the stub returns correct value.
 
   __ bind(&miss);
   __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index 22c0608..aafdc77 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -108,13 +108,19 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector, slot);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
+  __ Push(slot, vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(vector, slot);
+  __ Pop(slot, vector);
 }
 
 
@@ -124,6 +130,13 @@
   __ addi(sp, sp, Operand(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -325,24 +338,6 @@
   __ TailCallStub(&stub);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -361,12 +356,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -624,6 +613,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index fd2962d..6dd7881 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -451,10 +451,11 @@
 
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 
@@ -464,6 +465,13 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index b399c5a..504bace 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -105,12 +105,18 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector, slot);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
+  __ Push(slot, vector);
 }
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(vector, slot);
+  __ Pop(slot, vector);
 }
 
 void PropertyHandlerCompiler::DiscardVectorAndSlot() {
@@ -119,6 +125,14 @@
   __ la(sp, MemOperand(sp, 2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  // No-op. Return address is in lr register.
+}
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -310,21 +324,6 @@
   __ TailCallStub(&stub);
 }
 
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
-}
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -340,11 +339,6 @@
   __ mov(this->name(), Operand(name));
 }
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -593,6 +587,10 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
+
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
     LanguageMode language_mode) {
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index 6bb484a..08eb3e4 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -437,10 +437,11 @@
 }
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(),
+  __ Push(StoreWithVectorDescriptor::ValueRegister(),
           StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister());
+          StoreWithVectorDescriptor::VectorRegister(),
+          StoreWithVectorDescriptor::ReceiverRegister(),
+          StoreWithVectorDescriptor::NameRegister());
 }
 
 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
@@ -449,6 +450,14 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
 static void KeyedStoreGenerateMegamorphicHelper(
     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index 31d7e2e..fe1adaa 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -4,6 +4,7 @@
 
 #include "src/ic/stub-cache.h"
 
+#include "src/ast/ast.h"
 #include "src/base/bits.h"
 #include "src/type-info.h"
 
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
index a053555..ebcff44 100644
--- a/src/ic/stub-cache.h
+++ b/src/ic/stub-cache.h
@@ -10,6 +10,7 @@
 namespace v8 {
 namespace internal {
 
+class SmallMapList;
 
 // The stub cache is used for megamorphic property accesses.
 // It maps (map, name, type) to property access handlers. The cache does not
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index ba4daed..f386fc5 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -20,15 +20,21 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Push(vector);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
   __ Push(slot);
+  __ Push(vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ Pop(slot);
   __ Pop(vector);
+  __ Pop(slot);
 }
 
 
@@ -38,6 +44,15 @@
   __ addp(rsp, Immediate(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ Push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(tmp);
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -321,34 +336,6 @@
   __ ret(0);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-
-  __ PopReturnAddressTo(r11);
-  __ Push(receiver);
-  __ Push(name);
-  __ Push(value);
-  __ Push(slot);
-  __ Push(vector);
-  __ PushReturnAddressFrom(r11);
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM((masm()))
 
@@ -367,12 +354,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(false);  // Not implemented.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -638,6 +619,9 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index 21a1148..d0445a2 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -706,21 +706,20 @@
 }
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register temp = r11;
-  DCHECK(!temp.is(receiver) && !temp.is(name) && !temp.is(value));
-
-  __ PopReturnAddressTo(temp);
-  __ Push(receiver);
-  __ Push(name);
-  __ Push(value);
+  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+  Register name = StoreWithVectorDescriptor::NameRegister();
+  Register value = StoreWithVectorDescriptor::ValueRegister();
   Register slot = StoreWithVectorDescriptor::SlotRegister();
   Register vector = StoreWithVectorDescriptor::VectorRegister();
-  DCHECK(!temp.is(slot) && !temp.is(vector));
+  Register temp = r11;
+  DCHECK(!AreAliased(receiver, name, value, slot, vector, temp));
+
+  __ PopReturnAddressTo(temp);
+  __ Push(value);
   __ Push(slot);
   __ Push(vector);
+  __ Push(receiver);
+  __ Push(name);
   __ PushReturnAddressFrom(temp);
 }
 
@@ -764,6 +763,13 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 #undef __
 
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 4bf0af2..5eca3dc 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -59,15 +59,21 @@
 void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
                                                 Register slot) {
   MacroAssembler* masm = this->masm();
-  __ push(vector);
+  STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+                LoadWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+                StoreWithVectorDescriptor::kVector);
+  STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+                StoreTransitionDescriptor::kVector);
   __ push(slot);
+  __ push(vector);
 }
 
 
 void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
   MacroAssembler* masm = this->masm();
-  __ pop(slot);
   __ pop(vector);
+  __ pop(slot);
 }
 
 
@@ -77,6 +83,15 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+  MacroAssembler* masm = this->masm();
+  __ pop(tmp);
+}
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -150,12 +165,16 @@
   DCHECK(!accessor_holder.is(scratch));
   // Copy return value.
   __ pop(scratch);
-  // receiver
-  __ push(receiver);
-  // Write the arguments to stack frame.
+
   if (is_store) {
-    DCHECK(!receiver.is(store_parameter));
-    DCHECK(!scratch.is(store_parameter));
+    // Discard stack arguments.
+    __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+                          kPointerSize));
+  }
+  // Write the receiver and arguments to stack frame.
+  __ push(receiver);
+  if (is_store) {
+    DCHECK(!AreAliased(receiver, scratch, store_parameter));
     __ push(store_parameter);
   }
   __ push(scratch);
@@ -252,8 +271,13 @@
     MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
     int accessor_index, int expected_arguments, Register scratch) {
   // ----------- S t a t e -------------
-  //  -- esp[0] : return address
+  //  -- esp[12] : value
+  //  -- esp[8]  : slot
+  //  -- esp[4]  : vector
+  //  -- esp[0]  : return address
   // -----------------------------------
+  __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
@@ -290,7 +314,14 @@
     // Restore context register.
     __ pop(esi);
   }
-  __ ret(0);
+  if (accessor_index >= 0) {
+    __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
+  } else {
+    // If we generate a global code snippet for deoptimization only, don't try
+    // to drop stack arguments for the StoreIC because they are not a part of
+    // expression stack and deoptimizer does not reconstruct them.
+    __ ret(0);
+  }
 }
 
 
@@ -316,32 +347,6 @@
   __ CallRuntime(id);
 }
 
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-
-  __ xchg(receiver, Operand(esp, 0));
-  __ push(name);
-  __ push(value);
-  __ push(slot);
-  __ push(vector);
-  __ push(receiver);  // which contains the return address.
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -360,19 +365,6 @@
 }
 
 
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
-    Register current_map, Register destination_map) {
-  DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
-  DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
-  ExternalReference virtual_slot =
-      ExternalReference::virtual_slot_register(isolate());
-  __ mov(destination_map, current_map);
-  __ pop(current_map);
-  __ mov(Operand::StaticVariable(virtual_slot), current_map);
-  __ pop(current_map);  // put vector in place.
-}
-
-
 void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
                                                    Register map_reg,
                                                    Register scratch,
@@ -532,7 +524,7 @@
     Label success;
     __ jmp(&success);
     __ bind(miss);
-    if (IC::ICUseVector(kind())) {
+    if (IC::ShouldPushPopSlotAndVector(kind())) {
       DCHECK(kind() == Code::LOAD_IC);
       PopVectorAndSlot();
     }
@@ -547,7 +539,7 @@
     Label success;
     __ jmp(&success);
     GenerateRestoreName(miss, name);
-    if (IC::ICUseVector(kind())) PopVectorAndSlot();
+    DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -641,13 +633,26 @@
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
 
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+  // Zap register aliases of the arguments passed on the stack to ensure they
+  // are properly loaded by the handler (debug-only).
+  STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+  __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
+  __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
+  __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
+}
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
     LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
+  __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
 
   __ pop(scratch1());  // remove the return address
+  // Discard stack arguments.
+  __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+                        kPointerSize));
   __ push(receiver());
   __ push(holder_reg);
   // If the callback cannot leak, then push the callback directly,
@@ -679,7 +684,7 @@
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     PushVectorAndSlot();
   }
   FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
@@ -701,7 +706,7 @@
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
-  if (IC::ICUseVector(kind())) {
+  if (IC::ShouldPushPopSlotAndVector(kind())) {
     DiscardVectorAndSlot();
   }
   __ ret(0);
diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc
index 9edf63b..11a8cdc 100644
--- a/src/ic/x87/ic-compiler-x87.cc
+++ b/src/ic/x87/ic-compiler-x87.cc
@@ -15,14 +15,21 @@
 
 void PropertyICCompiler::GenerateRuntimeSetProperty(
     MacroAssembler* masm, LanguageMode language_mode) {
-  // Return address is on the stack.
-  DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
-         !ebx.is(StoreDescriptor::NameRegister()) &&
-         !ebx.is(StoreDescriptor::ValueRegister()));
+  typedef StoreWithVectorDescriptor Descriptor;
+  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+  // ----------- S t a t e -------------
+  //  -- esp[12] : value
+  //  -- esp[8]  : slot
+  //  -- esp[4]  : vector
+  //  -- esp[0]  : return address
+  // -----------------------------------
+  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+                                        Descriptor::kValue);
+
+  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
+  __ mov(Operand(esp, 8), Descriptor::NameRegister());
+  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
   __ pop(ebx);
-  __ push(StoreDescriptor::ReceiverRegister());
-  __ push(StoreDescriptor::NameRegister());
-  __ push(StoreDescriptor::ValueRegister());
   __ push(Immediate(Smi::FromInt(language_mode)));
   __ push(ebx);  // return address
 
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index 76933f0..baf435e 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -409,7 +409,7 @@
   }
   // It's irrelevant whether array is smi-only or not when writing a smi.
   __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&non_smi_value);
   // Escape to elements kind transition case.
@@ -428,7 +428,7 @@
   __ mov(edx, value);  // Preserve the value which is returned.
   __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(fast_double);
   if (check_map == kCheckMap) {
@@ -457,7 +457,7 @@
     __ add(FieldOperand(receiver, JSArray::kLengthOffset),
            Immediate(Smi::FromInt(1)));
   }
-  __ ret(0);
+  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&transition_smi_elements);
   __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -504,12 +504,13 @@
 
 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                        LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
   // Return address is on the stack.
   Label slow, fast_object, fast_object_grow;
   Label fast_double, fast_double_grow;
   Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
+  Register receiver = Descriptor::ReceiverRegister();
+  Register key = Descriptor::NameRegister();
   DCHECK(receiver.is(edx));
   DCHECK(key.is(ecx));
 
@@ -522,6 +523,10 @@
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
+
+  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+                                        Descriptor::kValue);
+
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
@@ -551,22 +556,9 @@
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
 
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ push(Immediate(Smi::FromInt(slot)));
-  __ push(Immediate(dummy_vector));
-
   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
                                                      no_reg);
 
-  __ pop(StoreWithVectorDescriptor::VectorRegister());
-  __ pop(StoreWithVectorDescriptor::SlotRegister());
-
   // Cache miss.
   __ jmp(&miss);
 
@@ -705,18 +697,21 @@
 }
 
 static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
+  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+  Register name = StoreWithVectorDescriptor::NameRegister();
 
-  __ xchg(receiver, Operand(esp, 0));
+  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+  // Current stack layout:
+  // - esp[12]   -- value
+  // - esp[8]    -- slot
+  // - esp[4]    -- vector
+  // - esp[0]    -- return address
+
+  Register return_address = StoreWithVectorDescriptor::SlotRegister();
+  __ pop(return_address);
+  __ push(receiver);
   __ push(name);
-  __ push(value);
-  __ push(slot);
-  __ push(vector);
-  __ push(receiver);  // Contains the return address.
+  __ push(return_address);
 }
 
 
@@ -730,32 +725,33 @@
 
 
 void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  typedef StoreWithVectorDescriptor Descriptor;
   Label restore_miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
+  Register receiver = Descriptor::ReceiverRegister();
+  Register name = Descriptor::NameRegister();
+  Register value = Descriptor::ValueRegister();
+  // Since the slot and vector values are passed on the stack we can use
+  // respective registers as scratch registers.
+  Register scratch1 = Descriptor::VectorRegister();
+  Register scratch2 = Descriptor::SlotRegister();
 
-  // A lot of registers are needed for storing to slow case
-  // objects. Push and restore receiver but rely on
-  // GenerateDictionaryStore preserving the value and name.
+  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
+
+  // A lot of registers are needed for storing to slow case objects.
+  // Push and restore receiver but rely on GenerateDictionaryStore preserving
+  // the value and name.
   __ push(receiver);
-  __ push(vector);
-  __ push(slot);
 
-  Register dictionary = ebx;
+  Register dictionary = receiver;
   __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
   GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          receiver, edi);
-  __ Drop(3);
+                          scratch1, scratch2);
+  __ Drop(1);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(0);
+  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
 
   __ bind(&restore_miss);
-  __ pop(slot);
-  __ pop(vector);
   __ pop(receiver);
   __ IncrementCounter(counters->ic_store_normal_miss(), 1);
   GenerateMiss(masm);
@@ -770,6 +766,13 @@
   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
 
 #undef __
 
diff --git a/src/ic/x87/stub-cache-x87.cc b/src/ic/x87/stub-cache-x87.cc
index e0656f7..68fa615 100644
--- a/src/ic/x87/stub-cache-x87.cc
+++ b/src/ic/x87/stub-cache-x87.cc
@@ -22,8 +22,6 @@
   ExternalReference key_offset(stub_cache->key_reference(table));
   ExternalReference value_offset(stub_cache->value_reference(table));
   ExternalReference map_offset(stub_cache->map_reference(table));
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   Label miss;
   Code::Kind ic_kind = stub_cache->ic_kind();
@@ -55,19 +53,15 @@
     }
 #endif
 
-    // The vector and slot were pushed onto the stack before starting the
-    // probe, and need to be dropped before calling the handler.
     if (is_vector_store) {
-      // The overlap here is rather embarrassing. One does what one must.
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
+      // The value, vector and slot were passed to the IC on the stack and
+      // they are still there. So we can just jump to the handler.
       DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
       __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ pop(vector);
-      __ mov(Operand::StaticVariable(virtual_register), extra);
-      __ pop(extra);  // Pop "slot".
-      // Jump to the first instruction in the code stub.
-      __ jmp(Operand::StaticVariable(virtual_register));
+      __ jmp(extra);
     } else {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
       __ pop(LoadWithVectorDescriptor::VectorRegister());
       __ pop(LoadDescriptor::SlotRegister());
       __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -110,19 +104,10 @@
 
     // Jump to the first instruction in the code stub.
     if (is_vector_store) {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
       DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ mov(Operand::StaticVariable(virtual_register), offset);
-      __ pop(vector);
-      __ pop(offset);  // Pop "slot".
-      __ jmp(Operand::StaticVariable(virtual_register));
-    } else {
-      __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(offset);
     }
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
 
     // Pop at miss.
     __ bind(&miss);
diff --git a/src/identity-map.cc b/src/identity-map.cc
index 97b70ae..58dbf6b 100644
--- a/src/identity-map.cc
+++ b/src/identity-map.cc
@@ -6,7 +6,7 @@
 
 #include "src/base/functional.h"
 #include "src/heap/heap-inl.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/inspector/BUILD.gn b/src/inspector/BUILD.gn
index 56b96e1..15c090f 100644
--- a/src/inspector/BUILD.gn
+++ b/src/inspector/BUILD.gn
@@ -2,57 +2,98 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-protocol_path = "//third_party/WebKit/Source/platform/inspector_protocol"
-protocol_sources = [
-  "$target_gen_dir/Console.cpp",
-  "$target_gen_dir/Console.h",
-  "$target_gen_dir/Debugger.cpp",
-  "$target_gen_dir/Debugger.h",
-  "$target_gen_dir/HeapProfiler.cpp",
-  "$target_gen_dir/HeapProfiler.h",
-  "$target_gen_dir/Profiler.cpp",
-  "$target_gen_dir/Profiler.h",
-  "$target_gen_dir/public/Debugger.h",
-  "$target_gen_dir/public/Runtime.h",
-  "$target_gen_dir/Runtime.cpp",
-  "$target_gen_dir/Runtime.h",
+import("../../gni/v8.gni")
+
+_inspector_protocol = "//third_party/WebKit/Source/platform/inspector_protocol"
+import("$_inspector_protocol/inspector_protocol.gni")
+
+_protocol_generated = [
+  "protocol/Forward.h",
+  "protocol/Protocol.cpp",
+  "protocol/Protocol.h",
+  "protocol/Console.cpp",
+  "protocol/Console.h",
+  "protocol/Debugger.cpp",
+  "protocol/Debugger.h",
+  "protocol/HeapProfiler.cpp",
+  "protocol/HeapProfiler.h",
+  "protocol/Profiler.cpp",
+  "protocol/Profiler.h",
+  "protocol/Runtime.cpp",
+  "protocol/Runtime.h",
+  "protocol/Schema.cpp",
+  "protocol/Schema.h",
+  "../../include/inspector/Debugger.h",
+  "../../include/inspector/Runtime.h",
+  "../../include/inspector/Schema.h",
 ]
 
-action("inspector_protocol_sources") {
+action("protocol_compatibility") {
   visibility = [ ":*" ]  # Only targets in this file can depend on this.
-  script = "$protocol_path/CodeGenerator.py"
-  sources = [
-    "$protocol_path/CodeGenerator.py",
-    "$protocol_path/Exported_h.template",
-    "$protocol_path/Imported_h.template",
-    "$protocol_path/TypeBuilder_cpp.template",
-    "$protocol_path/TypeBuilder_h.template",
-  ]
+  script = "$_inspector_protocol/CheckProtocolCompatibility.py"
   inputs = [
     "js_protocol.json",
   ]
-  outputs = protocol_sources
+  _stamp = "$target_gen_dir/js_protocol.stamp"
+  outputs = [
+    _stamp,
+  ]
   args = [
-    "--protocol",
+    "--stamp",
+    rebase_path(_stamp, root_build_dir),
     rebase_path("js_protocol.json", root_build_dir),
-    "--string_type",
-    "String16",
-    "--export_macro",
-    "PLATFORM_EXPORT",
-    "--output_dir",
-    rebase_path(target_gen_dir, root_build_dir),
-    "--output_package",
-    "inspector",
-    "--exported_dir",
-    rebase_path("$target_gen_dir/public", root_build_dir),
-    "--exported_package",
-    "inspector/public",
   ]
 }
 
-config("inspector_protocol_config") {
-  include_dirs = [ "$protocol_path/../.." ]
-  defines = [ "V8_INSPECTOR_USE_STL" ]
+inspector_protocol_generate("protocol_generated_sources") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+  deps = [
+    ":protocol_compatibility",
+  ]
+
+  out_dir = target_gen_dir
+  config_file = "inspector_protocol_config.json"
+  inputs = [
+    "js_protocol.json",
+    "inspector_protocol_config.json",
+  ]
+  outputs = _protocol_generated
+}
+
+action("inspector_injected_script") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+  script = "build/xxd.py"
+  inputs = [
+    "injected-script-source.js",
+  ]
+  outputs = [
+    "$target_gen_dir/injected-script-source.h",
+  ]
+  args = [
+    "InjectedScriptSource_js",
+    rebase_path("injected-script-source.js", root_build_dir),
+    rebase_path("$target_gen_dir/injected-script-source.h", root_build_dir),
+  ]
+}
+
+action("inspector_debugger_script") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+  script = "build/xxd.py"
+  inputs = [
+    "debugger-script.js",
+  ]
+  outputs = [
+    "$target_gen_dir/debugger-script.h",
+  ]
+  args = [
+    "DebuggerScript_js",
+    rebase_path("debugger-script.js", root_build_dir),
+    rebase_path("$target_gen_dir/debugger-script.h", root_build_dir),
+  ]
+}
+
+config("inspector_config") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
   cflags = []
   if (is_win) {
     cflags += [
@@ -64,38 +105,85 @@
       "/wd4996",  # Deprecated function call.
     ]
   }
+  if (is_component_build) {
+    defines = [ "BUILDING_V8_SHARED" ]
+  }
 }
 
-source_set("inspector_protocol") {
+v8_source_set("inspector") {
   deps = [
-    ":inspector_protocol_sources",
+    ":inspector_debugger_script",
+    ":inspector_injected_script",
+    ":protocol_generated_sources",
   ]
-  configs += [ ":inspector_protocol_config" ]
-  include_dirs = [ "$target_gen_dir/.." ]
-  sources = protocol_sources + [
-              "$protocol_path/Allocator.h",
-              "$protocol_path/Array.h",
-              "$protocol_path/BackendCallback.h",
-              "$protocol_path/CodeGenerator.py",
-              "$protocol_path/Collections.h",
-              "$protocol_path/DispatcherBase.cpp",
-              "$protocol_path/DispatcherBase.h",
-              "$protocol_path/ErrorSupport.cpp",
-              "$protocol_path/ErrorSupport.h",
-              "$protocol_path/FrontendChannel.h",
-              "$protocol_path/Maybe.h",
-              "$protocol_path/Object.cpp",
-              "$protocol_path/Object.h",
-              "$protocol_path/Parser.cpp",
-              "$protocol_path/Parser.h",
-              "$protocol_path/Platform.h",
-              "$protocol_path/PlatformSTL.h",
-              "$protocol_path/String16.cpp",
-              "$protocol_path/String16.h",
-              "$protocol_path/String16STL.cpp",
-              "$protocol_path/String16STL.h",
-              "$protocol_path/ValueConversions.h",
-              "$protocol_path/Values.cpp",
-              "$protocol_path/Values.h",
-            ]
+  configs = [ ":inspector_config" ]
+  include_dirs = [
+    "../..",
+    "../../include",
+    "$target_gen_dir/../..",
+    "$target_gen_dir/../../include",
+  ]
+  sources = rebase_path(_protocol_generated, ".", target_gen_dir)
+  sources += [
+    "../../include/v8-inspector-protocol.h",
+    "../../include/v8-inspector.h",
+  ]
+  sources += get_target_outputs(":inspector_injected_script")
+  sources += get_target_outputs(":inspector_debugger_script")
+  sources += [
+    "injected-script-native.cc",
+    "injected-script-native.h",
+    "injected-script.cc",
+    "injected-script.h",
+    "inspected-context.cc",
+    "inspected-context.h",
+    "java-script-call-frame.cc",
+    "java-script-call-frame.h",
+    "protocol-platform.h",
+    "remote-object-id.cc",
+    "remote-object-id.h",
+    "script-breakpoint.h",
+    "search-util.cc",
+    "search-util.h",
+    "string-16.cc",
+    "string-16.h",
+    "string-util.cc",
+    "string-util.h",
+    "v8-console-agent-impl.cc",
+    "v8-console-agent-impl.h",
+    "v8-console-message.cc",
+    "v8-console-message.h",
+    "v8-console.cc",
+    "v8-console.h",
+    "v8-debugger-agent-impl.cc",
+    "v8-debugger-agent-impl.h",
+    "v8-debugger-script.cc",
+    "v8-debugger-script.h",
+    "v8-debugger.cc",
+    "v8-debugger.h",
+    "v8-function-call.cc",
+    "v8-function-call.h",
+    "v8-heap-profiler-agent-impl.cc",
+    "v8-heap-profiler-agent-impl.h",
+    "v8-injected-script-host.cc",
+    "v8-injected-script-host.h",
+    "v8-inspector-impl.cc",
+    "v8-inspector-impl.h",
+    "v8-inspector-session-impl.cc",
+    "v8-inspector-session-impl.h",
+    "v8-internal-value-type.cc",
+    "v8-internal-value-type.h",
+    "v8-profiler-agent-impl.cc",
+    "v8-profiler-agent-impl.h",
+    "v8-regex.cc",
+    "v8-regex.h",
+    "v8-runtime-agent-impl.cc",
+    "v8-runtime-agent-impl.h",
+    "v8-schema-agent-impl.cc",
+    "v8-schema-agent-impl.h",
+    "v8-stack-trace-impl.cc",
+    "v8-stack-trace-impl.h",
+    "v8-value-copier.cc",
+    "v8-value-copier.h",
+  ]
 }
diff --git a/src/inspector/DEPS b/src/inspector/DEPS
new file mode 100644
index 0000000..4486204
--- /dev/null
+++ b/src/inspector/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+  "-src",
+  "+src/inspector",
+  "+src/base/atomicops.h",
+  "+src/base/macros.h",
+  "+src/base/logging.h",
+  "+src/base/platform/platform.h",
+]
diff --git a/src/inspector/OWNERS b/src/inspector/OWNERS
new file mode 100644
index 0000000..2c4bd8d
--- /dev/null
+++ b/src/inspector/OWNERS
@@ -0,0 +1,15 @@
+set noparent
+
+alph@chromium.org
+caseq@chromium.org
+dgozman@chromium.org
+jochen@chromium.org
+kozyatinskiy@chromium.org
+pfeldman@chromium.org
+yangguo@chromium.org
+
+# Changes to remote debugging protocol require devtools review to
+# ensure backwards compatibility and committment to maintain.
+per-file js_protocol.json=set noparent
+per-file js_protocol.json=dgozman@chromium.org
+per-file js_protocol.json=pfeldman@chromium.org
diff --git a/src/inspector/PRESUBMIT.py b/src/inspector/PRESUBMIT.py
new file mode 100644
index 0000000..491564b
--- /dev/null
+++ b/src/inspector/PRESUBMIT.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""v8_inspect presubmit script
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into gcl.
+"""
+
+compile_note = "Be sure to run your patch by the compile-scripts.py script prior to committing!"
+
+
+def _CompileScripts(input_api, output_api):
+  local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
+
+  compilation_related_files = [
+    "js_protocol.json"
+    "compile-scripts.js",
+    "injected-script-source.js",
+    "debugger_script_externs.js",
+    "injected_script_externs.js",
+    "check_injected_script_source.js",
+    "debugger-script.js"
+  ]
+
+  for file in compilation_related_files:
+    if (any(file in path for path in local_paths)):
+      script_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
+        "build", "compile-scripts.py")
+      proc = input_api.subprocess.Popen(
+        [input_api.python_executable, script_path],
+        stdout=input_api.subprocess.PIPE,
+        stderr=input_api.subprocess.STDOUT)
+      out, _ = proc.communicate()
+      if "ERROR" in out or "WARNING" in out or proc.returncode:
+        return [output_api.PresubmitError(out)]
+      if "NOTE" in out:
+        return [output_api.PresubmitPromptWarning(out + compile_note)]
+      return []
+  return []
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  results = []
+  results.extend(_CompileScripts(input_api, output_api))
+  return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  results = []
+  results.extend(_CompileScripts(input_api, output_api))
+  return results
diff --git a/src/inspector/build/check_injected_script_source.py b/src/inspector/build/check_injected_script_source.py
new file mode 100644
index 0000000..0f2509c
--- /dev/null
+++ b/src/inspector/build/check_injected_script_source.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Copied from blink:
+# WebKit/Source/devtools/scripts/check_injected_script_source.py
+#
+
+import re
+import sys
+import os
+
+
+def validate_injected_script(fileName):
+    f = open(fileName, "r")
+    lines = f.readlines()
+    f.close()
+
+    proto_functions = "|".join([
+        # Array.prototype.*
+        "concat", "every", "filter", "forEach", "indexOf", "join", "lastIndexOf", "map", "pop",
+        "push", "reduce", "reduceRight", "reverse", "shift", "slice", "some", "sort", "splice", "toLocaleString", "toString", "unshift",
+        # Function.prototype.*
+        "apply", "bind", "call", "isGenerator", "toSource",
+        # Object.prototype.*
+        "toString",
+    ])
+
+    global_functions = "|".join([
+        "eval", "uneval", "isFinite", "isNaN", "parseFloat", "parseInt", "decodeURI", "decodeURIComponent",
+        "encodeURI", "encodeURIComponent", "escape", "unescape", "Map", "Set"
+    ])
+
+    # Black list:
+    # - instanceof, since e.g. "obj instanceof Error" may throw if Error is overridden and is not a function
+    # - Object.prototype.toString()
+    # - Array.prototype.*
+    # - Function.prototype.*
+    # - Math.*
+    # - Global functions
+    black_list_call_regex = re.compile(r"\sinstanceof\s+\w*|\bMath\.\w+\(|(?<!InjectedScriptHost)\.(" + proto_functions + r")\(|[^\.]\b(" + global_functions + r")\(")
+
+    errors_found = False
+    for i, line in enumerate(lines):
+        if line.find("suppressBlacklist") != -1:
+            continue
+        for match in re.finditer(black_list_call_regex, line):
+            errors_found = True
+            print "ERROR: Black listed expression in %s at line %02d column %02d: %s" % (os.path.basename(fileName), i + 1, match.start(), match.group(0))
+
+    if not errors_found:
+        print "OK"
+
+
+def main(argv):
+    if len(argv) < 2:
+        print('ERROR: Usage: %s path/to/injected-script-source.js' % argv[0])
+        return 1
+
+    validate_injected_script(argv[1])
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv))
diff --git a/src/inspector/build/closure-compiler.tar.gz b/src/inspector/build/closure-compiler.tar.gz
new file mode 100644
index 0000000..92087f3
--- /dev/null
+++ b/src/inspector/build/closure-compiler.tar.gz
Binary files differ
diff --git a/src/inspector/build/closure-compiler.tar.gz.sha1 b/src/inspector/build/closure-compiler.tar.gz.sha1
new file mode 100644
index 0000000..5366f51
--- /dev/null
+++ b/src/inspector/build/closure-compiler.tar.gz.sha1
@@ -0,0 +1 @@
+69937d3c239ca63e4c9045718886ddd096ffc054
\ No newline at end of file
diff --git a/src/inspector/build/closure-compiler/COPYING b/src/inspector/build/closure-compiler/COPYING
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/src/inspector/build/closure-compiler/COPYING
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/src/inspector/build/closure-compiler/README.md b/src/inspector/build/closure-compiler/README.md
new file mode 100644
index 0000000..080319e
--- /dev/null
+++ b/src/inspector/build/closure-compiler/README.md
@@ -0,0 +1,519 @@
+# [Google Closure Compiler](https://developers.google.com/closure/compiler/)
+
+[![Build Status](https://travis-ci.org/google/closure-compiler.svg?branch=master)](https://travis-ci.org/google/closure-compiler)
+
+The [Closure Compiler](https://developers.google.com/closure/compiler/) is a tool for making JavaScript download and run faster. It is a true compiler for JavaScript. Instead of compiling from a source language to machine code, it compiles from JavaScript to better JavaScript. It parses your JavaScript, analyzes it, removes dead code and rewrites and minimizes what's left. It also checks syntax, variable references, and types, and warns about common JavaScript pitfalls.
+
+## Getting Started
+ * [Download the latest version](http://dl.google.com/closure-compiler/compiler-latest.zip) ([Release details here](https://github.com/google/closure-compiler/wiki/Releases))
+ * [Download a specific version](https://github.com/google/closure-compiler/wiki/Binary-Downloads). Also available via:
+   - [Maven](https://github.com/google/closure-compiler/wiki/Maven)
+   - [NPM](https://www.npmjs.com/package/google-closure-compiler)
+ * See the [Google Developers Site](https://developers.google.com/closure/compiler/docs/gettingstarted_app) for documentation including instructions for running the compiler from the command line.
+
+## Options for Getting Help
+1. Post in the [Closure Compiler Discuss Group](https://groups.google.com/forum/#!forum/closure-compiler-discuss)
+2. Ask a question on [Stack Overflow](http://stackoverflow.com/questions/tagged/google-closure-compiler)
+3. Consult the [FAQ](https://github.com/google/closure-compiler/wiki/FAQ)
+
+## Building it Yourself
+
+Note: The Closure Compiler requires [Java 7 or higher](http://www.java.com/).
+
+### Using [Maven](http://maven.apache.org/)
+
+1. Download [Maven](http://maven.apache.org/download.cgi).
+
+2. Add sonatype snapshots repository to `~/.m2/settings.xml`:
+   ```
+   <profile>
+     <id>allow-snapshots</id>
+        <activation><activeByDefault>true</activeByDefault></activation>
+     <repositories>
+       <repository>
+         <id>snapshots-repo</id>
+         <url>https://oss.sonatype.org/content/repositories/snapshots</url>
+         <releases><enabled>false</enabled></releases>
+         <snapshots><enabled>true</enabled></snapshots>
+       </repository>
+     </repositories>
+   </profile>
+   ```
+
+3. Run `mvn -DskipTests` (omit the `-DskipTests` if you want to run all the
+unit tests too).
+
+    This will produce a jar file called `target/closure-compiler-1.0-SNAPSHOT.jar`.
+
+### Using [Eclipse](http://www.eclipse.org/)
+
+1. Download and open the [Eclipse IDE](http://www.eclipse.org/).
+2. Navigate to `File > New > Project ...` and create a Java Project. Give
+   the project a name.
+3. Select `Create project from existing source` and choose the root of the
+   checked-out source tree as the existing directory.
+3. Navigate to the `build.xml` file. You will see all the build rules in
+   the Outline pane. Run the `jar` rule to build the compiler in
+   `build/compiler.jar`.
+
+## Running
+
+On the command line, at the root of this project, type
+
+```
+java -jar target/closure-compiler-1.0-SNAPSHOT.jar
+```
+
+This starts the compiler in interactive mode. Type
+
+```javascript
+var x = 17 + 25;
+```
+
+then hit "Enter", then hit "Ctrl-Z" (on Windows) or "Ctrl-D" (on Mac or Linux)
+and "Enter" again. The Compiler will respond:
+
+```javascript
+var x=42;
+```
+
+The Closure Compiler has many options for reading input from a file, writing
+output to a file, checking your code, and running optimizations. To learn more,
+type
+
+```
+java -jar compiler.jar --help
+```
+
+More detailed information about running the Closure Compiler is available in the
+[documentation](http://code.google.com/closure/compiler/docs/gettingstarted_app.html).
+
+## Compiling Multiple Scripts
+
+If you have multiple scripts, you should compile them all together with one
+compile command.
+
+```bash
+java -jar compiler.jar --js_output_file=out.js in1.js in2.js in3.js ...
+```
+
+You can also use minimatch-style globs.
+
+```bash
+# Recursively include all js files in subdirs
+java -jar compiler.jar --js_output_file=out.js 'src/**.js'
+
+# Recursively include all js files in subdirs, excluding test files.
+# Use single-quotes, so that bash doesn't try to expand the '!'
+java -jar compiler.jar --js_output_file=out.js 'src/**.js' '!**_test.js'
+```
+
+The Closure Compiler will concatenate the files in the order they're passed at
+the command line.
+
+If you're using globs or many files, you may start to run into
+problems with managing dependencies between scripts. In this case, you should
+use the [Closure Library](https://developers.google.com/closure/library/). It
+contains functions for enforcing dependencies between scripts, and Closure Compiler
+will re-order the inputs automatically.
+
+## How to Contribute
+### Reporting a bug
+1. First make sure that it is really a bug and not simply the way that Closure Compiler works (especially true for ADVANCED_OPTIMIZATIONS).
+ * Check the [official documentation](https://developers.google.com/closure/compiler/)
+ * Consult the [FAQ](https://github.com/google/closure-compiler/wiki/FAQ)
+ * Search on [Stack Overflow](http://stackoverflow.com/questions/tagged/google-closure-compiler) and in the [Closure Compiler Discuss Group](https://groups.google.com/forum/#!forum/closure-compiler-discuss)
+2. If you still think you have found a bug, make sure someone hasn't already reported it. See the list of [known issues](https://github.com/google/closure-compiler/issues).
+3. If it hasn't been reported yet, post a new issue. Make sure to add enough detail so that the bug can be recreated. The smaller the reproduction code, the better.
+
+### Suggesting a Feature
+1. Consult the [FAQ](https://github.com/google/closure-compiler/wiki/FAQ) to make sure that the behaviour you would like isn't specifically excluded (such as string inlining).
+2. Make sure someone hasn't requested the same thing. See the list of [known issues](https://github.com/google/closure-compiler/issues).
+3. Read up on [what type of feature requests are accepted](https://github.com/google/closure-compiler/wiki/FAQ#how-do-i-submit-a-feature-request-for-a-new-type-of-optimization).
+4. Submit your request as an issue.
+
+### Submitting patches
+1. All contributors must sign a contributor license agreement (CLA).
+   A CLA basically says that you own the rights to any code you contribute,
+   and that you give us permission to use that code in Closure Compiler.
+   You maintain the copyright on that code.
+   If you own all the rights to your code, you can fill out an
+   [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
+   If your employer has any rights to your code, then they also need to fill out
+   a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
+   If you don't know if your employer has any rights to your code, you should
+   ask before signing anything.
+   By default, anyone with an @google.com email address already has a CLA
+   signed for them.
+2. To make sure your changes are of the type that will be accepted, ask about your patch on the [Closure Compiler Discuss Group](https://groups.google.com/forum/#!forum/closure-compiler-discuss)
+3. Fork the repository.
+4. Make your changes.
+5. Submit a pull request for your changes. A project developer will review your work and then merge your request into the project.
+
+## Closure Compiler License
+
+Copyright 2009 The Closure Compiler Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+## Dependency Licenses
+
+### Rhino
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td>
+      <code>src/com/google/javascript/rhino</code>, <code>test/com/google/javascript/rhino</code>
+    </td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>http://www.mozilla.org/rhino</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>1.5R3, with heavy modifications</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Netscape Public License and MPL / GPL dual license</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>A partial copy of Mozilla Rhino. Mozilla Rhino is an
+implementation of JavaScript for the JVM.  The JavaScript
+parse tree data structures were extracted and modified
+significantly for use by Google's JavaScript compiler.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>The packages have been renamespaced. All code not
+relevant to the parse tree has been removed. A JsDoc parser and static typing
+system have been added.</td>
+  </tr>
+</table>
+
+### Args4j
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/args4j.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://args4j.dev.java.net/</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>2.0.26</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>MIT</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>args4j is a small Java class library that makes it easy to parse command line
+options/arguments in your CUI application.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### Guava Libraries
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/guava.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://github.com/google/guava</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>20.0</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Apache License 2.0</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Google's core Java libraries.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### JSR 305
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/jsr305.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>http://code.google.com/p/jsr-305/</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>svn revision 47</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>BSD License</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Annotations for software defect detection.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### JUnit
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/junit.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>http://sourceforge.net/projects/junit/</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>4.11</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Common Public License 1.0</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>A framework for writing and running automated tests in Java.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### Protocol Buffers
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/protobuf-java.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://github.com/google/protobuf</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>2.5.0</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>New BSD License</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Supporting libraries for protocol buffers,
+an encoding of structured data.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### Truth
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/truth.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://github.com/google/truth</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>0.24</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Apache License 2.0</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Assertion/Proposition framework for Java unit tests</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### Ant
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td>
+      <code>lib/ant.jar</code>, <code>lib/ant-launcher.jar</code>
+    </td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>http://ant.apache.org/bindownload.cgi</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>1.8.1</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Apache License 2.0</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Ant is a Java based build tool. In theory it is kind of like "make"
+without make's wrinkles and with the full portability of pure java code.</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### GSON
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>lib/gson.jar</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://github.com/google/gson</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>2.2.4</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Apache license 2.0</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>A Java library to convert JSON to Java objects and vice-versa</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>None</td>
+  </tr>
+</table>
+
+### Node.js Closure Compiler Externs
+
+<table>
+  <tr>
+    <td>Code Path</td>
+    <td><code>contrib/nodejs</code></td>
+  </tr>
+
+  <tr>
+    <td>URL</td>
+    <td>https://github.com/dcodeIO/node.js-closure-compiler-externs</td>
+  </tr>
+
+  <tr>
+    <td>Version</td>
+    <td>e891b4fbcf5f466cc4307b0fa842a7d8163a073a</td>
+  </tr>
+
+  <tr>
+    <td>License</td>
+    <td>Apache 2.0 license</td>
+  </tr>
+
+  <tr>
+    <td>Description</td>
+    <td>Type contracts for NodeJS APIs</td>
+  </tr>
+
+  <tr>
+    <td>Local Modifications</td>
+    <td>Substantial changes to make them compatible with NpmCommandLineRunner.</td>
+  </tr>
+</table>
diff --git a/src/inspector/build/closure-compiler/closure-compiler.jar b/src/inspector/build/closure-compiler/closure-compiler.jar
new file mode 100644
index 0000000..0d42389
--- /dev/null
+++ b/src/inspector/build/closure-compiler/closure-compiler.jar
Binary files differ
diff --git a/src/inspector/build/compile-scripts.py b/src/inspector/build/compile-scripts.py
new file mode 100755
index 0000000..abe167a
--- /dev/null
+++ b/src/inspector/build/compile-scripts.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import os.path as path
+import generate_protocol_externs
+import re
+import subprocess
+import sys
+
+if len(sys.argv) == 2 and sys.argv[1] == '--help':
+  print("Usage: %s" % path.basename(sys.argv[0]))
+  sys.exit(0)
+
+java_required_major = 1
+java_required_minor = 7
+
+v8_inspector_path = path.dirname(path.dirname(path.abspath(__file__)))
+
+protocol_externs_file = path.join(v8_inspector_path, 'protocol_externs.js')
+injected_script_source_name = path.join(v8_inspector_path,
+  'injected-script-source.js')
+injected_script_externs_file = path.join(v8_inspector_path,
+  'injected_script_externs.js')
+debugger_script_source_name = path.join(v8_inspector_path,
+  'debugger-script.js')
+debugger_script_externs_file = path.join(v8_inspector_path,
+  'debugger_script_externs.js')
+
+generate_protocol_externs.generate_protocol_externs(protocol_externs_file,
+  path.join(v8_inspector_path, 'js_protocol.json'))
+
+error_warning_regex = re.compile(r'WARNING|ERROR')
+
+closure_compiler_jar = path.join(v8_inspector_path, 'build',
+  'closure-compiler', 'closure-compiler.jar')
+
+common_closure_args = [
+  '--checks_only',
+  '--warning_level', 'VERBOSE'
+]
+
+# Error reporting and checking.
+errors_found = False
+
+def popen(arguments):
+  return subprocess.Popen(arguments, stdout=subprocess.PIPE,
+    stderr=subprocess.STDOUT)
+
+def error_excepthook(exctype, value, traceback):
+  print 'ERROR:'
+  sys.__excepthook__(exctype, value, traceback)
+sys.excepthook = error_excepthook
+
+def has_errors(output):
+  return re.search(error_warning_regex, output) != None
+
+# Find java. Based on
+# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python.
+def which(program):
+  def is_exe(fpath):
+    return path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+  fpath, fname = path.split(program)
+  if fpath:
+    if is_exe(program):
+      return program
+  else:
+    for part in os.environ['PATH'].split(os.pathsep):
+      part = part.strip('"')
+      exe_file = path.join(part, program)
+      if is_exe(exe_file):
+        return exe_file
+  return None
+
+def find_java():
+  exec_command = None
+  has_server_jvm = True
+  java_path = which('java')
+  if not java_path:
+    java_path = which('java.exe')
+
+  if not java_path:
+    print 'NOTE: No Java executable found in $PATH.'
+    sys.exit(0)
+
+  is_ok = False
+  java_version_out, _ = popen([java_path, '-version']).communicate()
+  java_build_regex = re.compile(r'^\w+ version "(\d+)\.(\d+)')
+  # pylint: disable=E1103
+  match = re.search(java_build_regex, java_version_out)
+  if match:
+    major = int(match.group(1))
+    minor = int(match.group(2))
+    is_ok = major >= java_required_major and minor >= java_required_minor
+  if is_ok:
+    exec_command = [java_path, '-Xms1024m', '-server',
+      '-XX:+TieredCompilation']
+    check_server_proc = popen(exec_command + ['-version'])
+    check_server_proc.communicate()
+    if check_server_proc.returncode != 0:
+      # Not all Java installs have server JVMs.
+      exec_command = exec_command.remove('-server')
+      has_server_jvm = False
+
+  if not is_ok:
+    print 'NOTE: Java executable version %d.%d or above not found in $PATH.' % (java_required_major, java_required_minor)
+    sys.exit(0)
+  print 'Java executable: %s%s' % (java_path, '' if has_server_jvm else ' (no server JVM)')
+  return exec_command
+
+java_exec = find_java()
+
+spawned_compiler_command = java_exec + [
+  '-jar',
+  closure_compiler_jar
+] + common_closure_args
+
+print 'Compiling injected-script-source.js...'
+
+command = spawned_compiler_command + [
+  '--externs', injected_script_externs_file,
+  '--externs', protocol_externs_file,
+  '--js', injected_script_source_name
+]
+
+injected_script_compile_proc = popen(command)
+
+print 'Compiling debugger-script.js...'
+
+command = spawned_compiler_command + [
+  '--externs', debugger_script_externs_file,
+  '--js', debugger_script_source_name,
+  '--new_type_inf'
+]
+
+debugger_script_compile_proc = popen(command)
+
+print 'Validating injected-script-source.js...'
+injectedscript_check_script_path = path.join(v8_inspector_path, 'build',
+  'check_injected_script_source.py')
+validate_injected_script_proc = popen([sys.executable,
+  injectedscript_check_script_path, injected_script_source_name])
+
+print
+
+(injected_script_compile_out, _) = injected_script_compile_proc.communicate()
+print 'injected-script-source.js compilation output:%s' % os.linesep
+print injected_script_compile_out
+errors_found |= has_errors(injected_script_compile_out)
+
+(debugger_script_compiler_out, _) = debugger_script_compile_proc.communicate()
+print 'debugger-script.js compilation output:%s' % os.linesep
+print debugger_script_compiler_out
+errors_found |= has_errors(debugger_script_compiler_out)
+
+(validate_injected_script_out, _) = validate_injected_script_proc.communicate()
+print 'Validate injected-script-source.js output:%s' % os.linesep
+print validate_injected_script_out if validate_injected_script_out else '<empty>'
+errors_found |= has_errors(validate_injected_script_out)
+
+os.remove(protocol_externs_file)
+
+if errors_found:
+  print 'ERRORS DETECTED'
+  sys.exit(1)
diff --git a/src/inspector/build/generate_protocol_externs.py b/src/inspector/build/generate_protocol_externs.py
new file mode 100755
index 0000000..c2ba2c5
--- /dev/null
+++ b/src/inspector/build/generate_protocol_externs.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import json
+
+type_traits = {
+    "any": "*",
+    "string": "string",
+    "integer": "number",
+    "number": "number",
+    "boolean": "boolean",
+    "array": "!Array.<*>",
+    "object": "!Object",
+}
+
+promisified_domains = {
+    "Accessibility",
+    "Animation",
+    "CSS",
+    "Emulation",
+    "Profiler"
+}
+
+ref_types = {}
+
+def full_qualified_type_id(domain_name, type_id):
+    if type_id.find(".") == -1:
+        return "%s.%s" % (domain_name, type_id)
+    return type_id
+
+
+def fix_camel_case(name):
+    prefix = ""
+    if name[0] == "-":
+        prefix = "Negative"
+        name = name[1:]
+    refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
+    refined = to_title_case(refined)
+    return prefix + re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
+
+
+def to_title_case(name):
+    return name[:1].upper() + name[1:]
+
+
+def generate_enum(name, json):
+    enum_members = []
+    for member in json["enum"]:
+        enum_members.append("    %s: \"%s\"" % (fix_camel_case(member), member))
+    return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
+
+
+def param_type(domain_name, param):
+    if "type" in param:
+        if param["type"] == "array":
+            items = param["items"]
+            return "!Array.<%s>" % param_type(domain_name, items)
+        else:
+            return type_traits[param["type"]]
+    if "$ref" in param:
+        type_id = full_qualified_type_id(domain_name, param["$ref"])
+        if type_id in ref_types:
+            return ref_types[type_id]
+        else:
+            print "Type not found: " + type_id
+            return "!! Type not found: " + type_id
+
+
+def load_schema(file, domains):
+    input_file = open(file, "r")
+    json_string = input_file.read()
+    parsed_json = json.loads(json_string)
+    domains.extend(parsed_json["domains"])
+
+
+def generate_protocol_externs(output_path, file1):
+    domains = []
+    load_schema(file1, domains)
+    output_file = open(output_path, "w")
+
+    output_file.write(
+"""
+var InspectorBackend = {}
+
+var Protocol = {};
+/** @typedef {string}*/
+Protocol.Error;
+""")
+
+    for domain in domains:
+        domain_name = domain["domain"]
+        if "types" in domain:
+            for type in domain["types"]:
+                type_id = full_qualified_type_id(domain_name, type["id"])
+                ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
+
+    for domain in domains:
+        domain_name = domain["domain"]
+        promisified = domain_name in promisified_domains
+
+        output_file.write("\n\n/**\n * @constructor\n*/\n")
+        output_file.write("Protocol.%sAgent = function(){};\n" % domain_name)
+
+        if "commands" in domain:
+            for command in domain["commands"]:
+                output_file.write("\n/**\n")
+                params = []
+                has_return_value = "returns" in command
+                explicit_parameters = promisified and has_return_value
+                if ("parameters" in command):
+                    for in_param in command["parameters"]:
+                        # All parameters are not optional in case of promisified domain with return value.
+                        if (not explicit_parameters and "optional" in in_param):
+                            params.append("opt_%s" % in_param["name"])
+                            output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
+                        else:
+                            params.append(in_param["name"])
+                            output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
+                returns = []
+                returns.append("?Protocol.Error")
+                if ("error" in command):
+                    returns.append("%s=" % param_type(domain_name, command["error"]))
+                if (has_return_value):
+                    for out_param in command["returns"]:
+                        if ("optional" in out_param):
+                            returns.append("%s=" % param_type(domain_name, out_param))
+                        else:
+                            returns.append("%s" % param_type(domain_name, out_param))
+                callback_return_type = "void="
+                if explicit_parameters:
+                    callback_return_type = "T"
+                elif promisified:
+                    callback_return_type = "T="
+                output_file.write(" * @param {function(%s):%s} opt_callback\n" % (", ".join(returns), callback_return_type))
+                if (promisified):
+                    output_file.write(" * @return {!Promise.<T>}\n")
+                    output_file.write(" * @template T\n")
+                params.append("opt_callback")
+
+                output_file.write(" */\n")
+                output_file.write("Protocol.%sAgent.prototype.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
+                output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
+                output_file.write("Protocol.%sAgent.prototype.invoke_%s = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
+
+        output_file.write("\n\n\nvar %sAgent = function(){};\n" % domain_name)
+
+        if "types" in domain:
+            for type in domain["types"]:
+                if type["type"] == "object":
+                    typedef_args = []
+                    if "properties" in type:
+                        for property in type["properties"]:
+                            suffix = ""
+                            if ("optional" in property):
+                                suffix = "|undefined"
+                            if "enum" in property:
+                                enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
+                                output_file.write(generate_enum(enum_name, property))
+                                typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
+                            else:
+                                typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
+                    if (typedef_args):
+                        output_file.write("\n/** @typedef {!{%s}} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
+                    else:
+                        output_file.write("\n/** @typedef {!Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
+                elif type["type"] == "string" and "enum" in type:
+                    output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
+                elif type["type"] == "array":
+                    output_file.write("\n/** @typedef {!Array.<!%s>} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), domain_name, type["id"]))
+                else:
+                    output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
+
+        output_file.write("/** @interface */\n")
+        output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
+        if "events" in domain:
+            for event in domain["events"]:
+                params = []
+                if ("parameters" in event):
+                    output_file.write("/**\n")
+                    for param in event["parameters"]:
+                        if ("optional" in param):
+                            params.append("opt_%s" % param["name"])
+                            output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
+                        else:
+                            params.append(param["name"])
+                            output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
+                    output_file.write(" */\n")
+                output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
+
+    output_file.write("\n/** @constructor\n * @param {!Object.<string, !Object>} agentsMap\n */\n")
+    output_file.write("Protocol.Agents = function(agentsMap){this._agentsMap;};\n")
+    output_file.write("/**\n * @param {string} domain\n * @param {!Object} dispatcher\n */\n")
+    output_file.write("Protocol.Agents.prototype.registerDispatcher = function(domain, dispatcher){};\n")
+    for domain in domains:
+        domain_name = domain["domain"]
+        uppercase_length = 0
+        while uppercase_length < len(domain_name) and domain_name[uppercase_length].isupper():
+            uppercase_length += 1
+
+        output_file.write("/** @return {!Protocol.%sAgent}*/\n" % domain_name)
+        output_file.write("Protocol.Agents.prototype.%s = function(){};\n" % (domain_name[:uppercase_length].lower() + domain_name[uppercase_length:] + "Agent"))
+
+        output_file.write("/**\n * @param {!%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
+        output_file.write("Protocol.Agents.prototype.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
+
+
+    output_file.close()
+
+if __name__ == "__main__":
+    import sys
+    import os.path
+    program_name = os.path.basename(__file__)
+    if len(sys.argv) < 4 or sys.argv[1] != "-o":
+        sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
+        exit(1)
+    output_path = sys.argv[2]
+    input_path = sys.argv[3]
+    generate_protocol_externs(output_path, input_path)
diff --git a/src/inspector/build/rjsmin.py b/src/inspector/build/rjsmin.py
new file mode 100755
index 0000000..8357a6d
--- /dev/null
+++ b/src/inspector/build/rjsmin.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 - 2013
+# Andr\xe9 Malo or his licensors, as applicable
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+r"""
+=====================
+ Javascript Minifier
+=====================
+
+rJSmin is a javascript minifier written in python.
+
+The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\.
+
+The module is a re-implementation aiming for speed, so it can be used at
+runtime (rather than during a preprocessing step). Usually it produces the
+same results as the original ``jsmin.c``. It differs in the following ways:
+
+- there is no error detection: unterminated string, regex and comment
+  literals are treated as regular javascript code and minified as such.
+- Control characters inside string and regex literals are left untouched; they
+  are not converted to spaces (nor to \n)
+- Newline characters are not allowed inside string and regex literals, except
+  for line continuations in string literals (ECMA-5).
+- "return /regex/" is recognized correctly.
+- "+ +" and "- -" sequences are not collapsed to '++' or '--'
+- Newlines before ! operators are removed more sensibly
+- rJSmin does not handle streams, but only complete strings. (However, the
+  module provides a "streamy" interface).
+
+Since most parts of the logic are handled by the regex engine it's way
+faster than the original python port of ``jsmin.c`` by Baruch Even. The speed
+factor varies between about 6 and 55 depending on input and python version
+(it gets faster the more compressed the input already is). Compared to the
+speed-refactored python port by Dave St.Germain the performance gain is less
+dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for
+details.
+
+rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
+
+Both python 2 and python 3 are supported.
+
+.. _jsmin.c by Douglas Crockford:
+   http://www.crockford.com/javascript/jsmin.c
+"""
+__author__ = "Andr\xe9 Malo"
+__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
+__docformat__ = "restructuredtext en"
+__license__ = "Apache License, Version 2.0"
+__version__ = '1.0.7'
+__all__ = ['jsmin']
+
+import re as _re
+
+
+def _make_jsmin(python_only=False):
+    """
+    Generate JS minifier based on `jsmin.c by Douglas Crockford`_
+
+    .. _jsmin.c by Douglas Crockford:
+       http://www.crockford.com/javascript/jsmin.c
+
+    :Parameters:
+      `python_only` : ``bool``
+        Use only the python variant. If true, the c extension is not even
+        tried to be loaded.
+
+    :Return: Minifier
+    :Rtype: ``callable``
+    """
+    # pylint: disable = R0912, R0914, W0612
+    if not python_only:
+        try:
+            import _rjsmin
+        except ImportError:
+            pass
+        else:
+            return _rjsmin.jsmin
+    try:
+        xrange
+    except NameError:
+        xrange = range  # pylint: disable = W0622
+
+    space_chars = r'[\000-\011\013\014\016-\040]'
+
+    line_comment = r'(?://[^\r\n]*)'
+    space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
+    string1 = \
+        r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
+    string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
+    strings = r'(?:%s|%s)' % (string1, string2)
+
+    charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
+    nospecial = r'[^/\\\[\r\n]'
+    regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
+        nospecial, charclass, nospecial)
+    space = r'(?:%s|%s)' % (space_chars, space_comment)
+    newline = r'(?:%s?[\r\n])' % line_comment
+
+    def fix_charclass(result):
+        """ Fixup string of chars to fit into a regex char class """
+        pos = result.find('-')
+        if pos >= 0:
+            result = r'%s%s-' % (result[:pos], result[pos + 1:])
+
+        def sequentize(string):
+            """
+            Notate consecutive characters as sequence
+
+            (1-4 instead of 1234)
+            """
+            first, last, result = None, None, []
+            for char in map(ord, string):
+                if last is None:
+                    first = last = char
+                elif last + 1 == char:
+                    last = char
+                else:
+                    result.append((first, last))
+                    first = last = char
+            if last is not None:
+                result.append((first, last))
+            return ''.join(['%s%s%s' % (
+                chr(first),
+                last > first + 1 and '-' or '',
+                last != first and chr(last) or '') for first, last in result])
+
+        return _re.sub(r'([\000-\040\047])',  # for better portability
+            lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
+                .replace('\\', '\\\\')
+                .replace('[', '\\[')
+                .replace(']', '\\]')))
+
+    def id_literal_(what):
+        """ Make id_literal like char class """
+        match = _re.compile(what).match
+        result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
+        return '[^%s]' % fix_charclass(result)
+
+    def not_id_literal_(keep):
+        """ Make negated id_literal like char class """
+        match = _re.compile(id_literal_(keep)).match
+        result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
+        return r'[%s]' % fix_charclass(result)
+
+    not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
+    preregex1 = r'[(,=:\[!&|?{};\r\n]'
+    preregex2 = r'%(not_id_literal)sreturn' % locals()
+
+    id_literal = id_literal_(r'[a-zA-Z0-9_$]')
+    id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
+    id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
+
+    dull = r'[^\047"/\000-\040]'
+
+    space_sub = _re.compile((
+        r'(%(dull)s+)'
+        r'|(%(strings)s%(dull)s*)'
+        r'|(?<=%(preregex1)s)'
+            r'%(space)s*(?:%(newline)s%(space)s*)*'
+            r'(%(regex)s%(dull)s*)'
+        r'|(?<=%(preregex2)s)'
+            r'%(space)s*(?:%(newline)s%(space)s)*'
+            r'(%(regex)s%(dull)s*)'
+        r'|(?<=%(id_literal_close)s)'
+            r'%(space)s*(?:(%(newline)s)%(space)s*)+'
+            r'(?=%(id_literal_open)s)'
+        r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
+        r'|(?<=\+)(%(space)s)+(?=\+)'
+        r'|(?<=-)(%(space)s)+(?=-)'
+        r'|%(space)s+'
+        r'|(?:%(newline)s%(space)s*)+') % locals()).sub
+    #print space_sub.__self__.pattern
+
+    def space_subber(match):
+        """ Substitution callback """
+        # pylint: disable = C0321, R0911
+        groups = match.groups()
+        if groups[0]:
+            return groups[0]
+        elif groups[1]:
+            return groups[1]
+        elif groups[2]:
+            return groups[2]
+        elif groups[3]:
+            return groups[3]
+        elif groups[4]:
+            return '\n'
+        elif groups[5] or groups[6] or groups[7]:
+            return ' '
+        else:
+            return ''
+
+    def jsmin(script):  # pylint: disable = W0621
+        r"""
+        Minify javascript based on `jsmin.c by Douglas Crockford`_\.
+
+        Instead of parsing the stream char by char, it uses a regular
+        expression approach which minifies the whole script with one big
+        substitution regex.
+
+        .. _jsmin.c by Douglas Crockford:
+           http://www.crockford.com/javascript/jsmin.c
+
+        :Parameters:
+          `script` : ``str``
+            Script to minify
+
+        :Return: Minified script
+        :Rtype: ``str``
+        """
+        return space_sub(space_subber, '\n%s\n' % script).strip()
+
+    return jsmin
+
+jsmin = _make_jsmin()
+
+
+def jsmin_for_posers(script):
+    r"""
+    Minify javascript based on `jsmin.c by Douglas Crockford`_\.
+
+    Instead of parsing the stream char by char, it uses a regular
+    expression approach which minifies the whole script with one big
+    substitution regex.
+
+    .. _jsmin.c by Douglas Crockford:
+       http://www.crockford.com/javascript/jsmin.c
+
+    :Warning: This function is the digest of a _make_jsmin() call. It just
+              utilizes the resulting regex. It's just for fun here and may
+              vanish any time. Use the `jsmin` function instead.
+
+    :Parameters:
+      `script` : ``str``
+        Script to minify
+
+    :Return: Minified script
+    :Rtype: ``str``
+    """
+    def subber(match):
+        """ Substitution callback """
+        groups = match.groups()
+        return (
+            groups[0] or
+            groups[1] or
+            groups[2] or
+            groups[3] or
+            (groups[4] and '\n') or
+            (groups[5] and ' ') or
+            (groups[6] and ' ') or
+            (groups[7] and ' ') or
+            '')
+
+    return _re.sub(
+        r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
+        r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
+        r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r\n])(?'
+        r':[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*'
+        r'(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*'
+        r'[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:('
+        r'?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\['
+        r'\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return'
+        r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
+        r'))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:'
+        r'/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?'
+        r':(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/'
+        r'\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|'
+        r'~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)'
+        r'*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]'
+        r'|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,./'
+        r':-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\013\01'
+        r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:'
+        r'-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*'
+        r'\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-'
+        r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013'
+        r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^'
+        r'\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^'
+        r'/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script).strip()
+
+
+if __name__ == '__main__':
+    import sys as _sys
+    _sys.stdout.write(jsmin(_sys.stdin.read()))
diff --git a/src/inspector/build/xxd.py b/src/inspector/build/xxd.py
new file mode 100644
index 0000000..5a63a7c
--- /dev/null
+++ b/src/inspector/build/xxd.py
@@ -0,0 +1,28 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Represent a file as a C++ constant string.
+
+Usage:
+python xxd.py VAR SOURCE DEST
+"""
+
+
+import sys
+import rjsmin
+
+
+def main():
+    variable_name, input_filename, output_filename = sys.argv[1:]
+    with open(input_filename) as input_file:
+        input_text = input_file.read()
+    input_text = rjsmin.jsmin(input_text)
+    hex_values = ['0x{0:02x}'.format(ord(char)) for char in input_text]
+    const_declaration = 'const char %s[] = {\n%s\n};\n' % (
+        variable_name, ', '.join(hex_values))
+    with open(output_filename, 'w') as output_file:
+        output_file.write(const_declaration)
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/inspector/debugger-script.js b/src/inspector/debugger-script.js
new file mode 100644
index 0000000..98910d6
--- /dev/null
+++ b/src/inspector/debugger-script.js
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+"use strict";
+
+(function () {
+
+var DebuggerScript = {};
+
+/** @enum */
+const PauseOnExceptionsState = {
+    DontPauseOnExceptions: 0,
+    PauseOnAllExceptions: 1,
+    PauseOnUncaughtExceptions: 2
+};
+DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
+
+DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
+Debug.clearBreakOnException();
+Debug.clearBreakOnUncaughtException();
+
+/**
+ * @param {?CompileEvent} eventData
+ */
+DebuggerScript.getAfterCompileScript = function(eventData)
+{
+    var script = eventData.script().value();
+    if (!script.is_debugger_script)
+        return DebuggerScript._formatScript(eventData.script().value());
+    return null;
+}
+
+/** @type {!Map<!ScopeType, string>} */
+DebuggerScript._scopeTypeNames = new Map();
+DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
+DebuggerScript._scopeTypeNames.set(ScopeType.Local, "local");
+DebuggerScript._scopeTypeNames.set(ScopeType.With, "with");
+DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
+DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
+DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
+DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
+
+/**
+ * @param {function()} fun
+ * @return {?Array<!Scope>}
+ */
+DebuggerScript.getFunctionScopes = function(fun)
+{
+    var mirror = MakeMirror(fun);
+    if (!mirror.isFunction())
+        return null;
+    var functionMirror = /** @type {!FunctionMirror} */(mirror);
+    var count = functionMirror.scopeCount();
+    if (count == 0)
+        return null;
+    var result = [];
+    for (var i = 0; i < count; i++) {
+        var scopeDetails = functionMirror.scope(i).details();
+        var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
+        if (!scopeObject)
+            continue;
+        result.push({
+            type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
+            object: scopeObject,
+            name: scopeDetails.name() || ""
+        });
+    }
+    return result;
+}
+
+/**
+ * @param {Object} object
+ * @return {?RawLocation}
+ */
+DebuggerScript.getGeneratorObjectLocation = function(object)
+{
+    var mirror = MakeMirror(object, true /* transient */);
+    if (!mirror.isGenerator())
+        return null;
+    var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
+    var funcMirror = generatorMirror.func();
+    if (!funcMirror.resolved())
+        return null;
+    var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
+    var script = funcMirror.script();
+    if (script && location) {
+        return {
+            scriptId: "" + script.id(),
+            lineNumber: location.line,
+            columnNumber: location.column
+        };
+    }
+    return null;
+}
+
+/**
+ * @param {Object} object
+ * @return {!Array<!{value: *}>|undefined}
+ */
+DebuggerScript.getCollectionEntries = function(object)
+{
+    var mirror = MakeMirror(object, true /* transient */);
+    if (mirror.isMap())
+        return /** @type {!MapMirror} */(mirror).entries();
+    if (mirror.isSet() || mirror.isIterator()) {
+        var result = [];
+        var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
+        for (var i = 0; i < values.length; ++i)
+            result.push({ value: values[i] });
+        return result;
+    }
+}
+
+/**
+ * @param {string|undefined} contextData
+ * @return {number}
+ */
+DebuggerScript._executionContextId = function(contextData)
+{
+    if (!contextData)
+        return 0;
+    var match = contextData.match(/^[^,]*,([^,]*),.*$/);
+    if (!match)
+        return 0;
+    return parseInt(match[1], 10) || 0;
+}
+
+/**
+ * @param {string|undefined} contextData
+ * @return {string}
+ */
+DebuggerScript._executionContextAuxData = function(contextData)
+{
+    if (!contextData)
+        return "";
+    var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
+    return match ? match[1] : "";
+}
+
+/**
+ * @param {string} contextGroupId
+ * @return {!Array<!FormattedScript>}
+ */
+DebuggerScript.getScripts = function(contextGroupId)
+{
+    var result = [];
+    var scripts = Debug.scripts();
+    var contextDataPrefix = null;
+    if (contextGroupId)
+        contextDataPrefix = contextGroupId + ",";
+    for (var i = 0; i < scripts.length; ++i) {
+        var script = scripts[i];
+        if (contextDataPrefix) {
+            if (!script.context_data)
+                continue;
+            // Context data is a string in the following format:
+            // <contextGroupId>,<contextId>,<auxData>
+            if (script.context_data.indexOf(contextDataPrefix) !== 0)
+                continue;
+        }
+        if (script.is_debugger_script)
+            continue;
+        result.push(DebuggerScript._formatScript(script));
+    }
+    return result;
+}
+
+/**
+ * @param {!Script} script
+ * @return {!FormattedScript}
+ */
+DebuggerScript._formatScript = function(script)
+{
+    var lineEnds = script.line_ends;
+    var lineCount = lineEnds.length;
+    var endLine = script.line_offset + lineCount - 1;
+    var endColumn;
+    // V8 will not count last line if script source ends with \n.
+    if (script.source[script.source.length - 1] === '\n') {
+        endLine += 1;
+        endColumn = 0;
+    } else {
+        if (lineCount === 1)
+            endColumn = script.source.length + script.column_offset;
+        else
+            endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
+    }
+    return {
+        id: script.id,
+        name: script.nameOrSourceURL(),
+        sourceURL: script.source_url,
+        sourceMappingURL: script.source_mapping_url,
+        source: script.source,
+        startLine: script.line_offset,
+        startColumn: script.column_offset,
+        endLine: endLine,
+        endColumn: endColumn,
+        executionContextId: DebuggerScript._executionContextId(script.context_data),
+        // Note that we cannot derive aux data from context id because of compilation cache.
+        executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
+    };
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!BreakpointInfo} info
+ * @return {string|undefined}
+ */
+DebuggerScript.setBreakpoint = function(execState, info)
+{
+    var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
+    var locations = Debug.findBreakPointActualLocations(breakId);
+    if (!locations.length)
+        return undefined;
+    info.lineNumber = locations[0].line;
+    info.columnNumber = locations[0].column;
+    return breakId.toString();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!{breakpointId: number}} info
+ */
+DebuggerScript.removeBreakpoint = function(execState, info)
+{
+    Debug.findBreakPoint(info.breakpointId, true);
+}
+
+/**
+ * @return {number}
+ */
+DebuggerScript.pauseOnExceptionsState = function()
+{
+    return DebuggerScript._pauseOnExceptionsState;
+}
+
+/**
+ * @param {number} newState
+ */
+DebuggerScript.setPauseOnExceptionsState = function(newState)
+{
+    DebuggerScript._pauseOnExceptionsState = newState;
+
+    if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
+        Debug.setBreakOnException();
+    else
+        Debug.clearBreakOnException();
+
+    if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
+        Debug.setBreakOnUncaughtException();
+    else
+        Debug.clearBreakOnUncaughtException();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {number} limit
+ * @return {!Array<!JavaScriptCallFrame>}
+ */
+DebuggerScript.currentCallFrames = function(execState, limit)
+{
+    var frames = [];
+    for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
+        frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
+    return frames;
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepIntoStatement = function(execState)
+{
+    execState.prepareStep(Debug.StepAction.StepIn);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepFrameStatement = function(execState)
+{
+    execState.prepareStep(Debug.StepAction.StepFrame);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepOverStatement = function(execState)
+{
+    execState.prepareStep(Debug.StepAction.StepNext);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepOutOfFunction = function(execState)
+{
+    execState.prepareStep(Debug.StepAction.StepOut);
+}
+
+DebuggerScript.clearStepping = function()
+{
+    Debug.clearStepping();
+}
+
+// Returns array in form:
+//      [ 0, <v8_result_report> ] in case of success
+//   or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
+// or throws exception with message.
+/**
+ * @param {number} scriptId
+ * @param {string} newSource
+ * @param {boolean} preview
+ * @return {!Array<*>}
+ */
+DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
+{
+    var scripts = Debug.scripts();
+    var scriptToEdit = null;
+    for (var i = 0; i < scripts.length; i++) {
+        if (scripts[i].id == scriptId) {
+            scriptToEdit = scripts[i];
+            break;
+        }
+    }
+    if (!scriptToEdit)
+        throw("Script not found");
+
+    var changeLog = [];
+    try {
+        var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
+        return [0, result.stack_modified];
+    } catch (e) {
+        if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
+            var details = /** @type {!LiveEditErrorDetails} */(e.details);
+            if (details.type === "liveedit_compile_error") {
+                var startPosition = details.position.start;
+                return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
+            }
+        }
+        throw e;
+    }
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.clearBreakpoints = function(execState)
+{
+    Debug.clearAllBreakPoints();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!{enabled: boolean}} info
+ */
+DebuggerScript.setBreakpointsActivated = function(execState, info)
+{
+    Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
+}
+
+/**
+ * @param {!BreakEvent} eventData
+ */
+DebuggerScript.getBreakpointNumbers = function(eventData)
+{
+    var breakpoints = eventData.breakPointsHit();
+    var numbers = [];
+    if (!breakpoints)
+        return numbers;
+
+    for (var i = 0; i < breakpoints.length; i++) {
+        var breakpoint = breakpoints[i];
+        var scriptBreakPoint = breakpoint.script_break_point();
+        numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
+    }
+    return numbers;
+}
+
+// NOTE: This function is performance critical, as it can be run on every
+// statement that generates an async event (like addEventListener) to support
+// asynchronous call stacks. Thus, when possible, initialize the data lazily.
+/**
+ * @param {!FrameMirror} frameMirror
+ * @return {!JavaScriptCallFrame}
+ */
+DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
+{
+    // Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
+    // The frameMirror and scopeMirror can be accessed only while paused on the debugger.
+    var frameDetails = frameMirror.details();
+
+    var funcObject = frameDetails.func();
+    var sourcePosition = frameDetails.sourcePosition();
+    var thisObject = frameDetails.receiver();
+
+    var isAtReturn = !!frameDetails.isAtReturn();
+    var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
+
+    var scopeMirrors = frameMirror.allScopes(false);
+    /** @type {!Array<number>} */
+    var scopeTypes = new Array(scopeMirrors.length);
+    /** @type {?Array<!Object>} */
+    var scopeObjects = new Array(scopeMirrors.length);
+    /** @type {!Array<string|undefined>} */
+    var scopeNames = new Array(scopeMirrors.length);
+    /** @type {?Array<number>} */
+    var scopeStartPositions = new Array(scopeMirrors.length);
+    /** @type {?Array<number>} */
+    var scopeEndPositions = new Array(scopeMirrors.length);
+    /** @type {?Array<function()|null>} */
+    var scopeFunctions = new Array(scopeMirrors.length);
+    for (var i = 0; i < scopeMirrors.length; ++i) {
+        var scopeDetails = scopeMirrors[i].details();
+        scopeTypes[i] = scopeDetails.type();
+        scopeObjects[i] = scopeDetails.object();
+        scopeNames[i] = scopeDetails.name();
+        scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
+        scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
+        scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
+    }
+
+    // Calculated lazily.
+    var scopeChain;
+    var funcMirror;
+    var location;
+    /** @type {!Array<?RawLocation>} */
+    var scopeStartLocations;
+    /** @type {!Array<?RawLocation>} */
+    var scopeEndLocations;
+    var details;
+
+    /**
+     * @param {!ScriptMirror|undefined} script
+     * @param {number} pos
+     * @return {?RawLocation}
+     */
+    function createLocation(script, pos)
+    {
+        if (!script)
+            return null;
+
+        var location = script.locationFromPosition(pos, true);
+        return {
+            "lineNumber": location.line,
+            "columnNumber": location.column,
+            "scriptId": String(script.id())
+        }
+    }
+
+    /**
+     * @return {!Array<!Object>}
+     */
+    function ensureScopeChain()
+    {
+        if (!scopeChain) {
+            scopeChain = [];
+            scopeStartLocations = [];
+            scopeEndLocations = [];
+            for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
+                var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
+                if (scopeObject) {
+                    scopeTypes[j] = scopeTypes[i];
+                    scopeNames[j] = scopeNames[i];
+                    scopeChain[j] = scopeObject;
+
+                    var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
+                    if (!funcMirror || !funcMirror.isFunction())
+                        funcMirror = new UnresolvedFunctionMirror(funcObject);
+
+                    var script = /** @type {!FunctionMirror} */(funcMirror).script();
+                    scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
+                    scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
+                    ++j;
+                }
+            }
+            scopeTypes.length = scopeChain.length;
+            scopeNames.length = scopeChain.length;
+            scopeObjects = null; // Free for GC.
+            scopeFunctions = null;
+            scopeStartPositions = null;
+            scopeEndPositions = null;
+        }
+        return scopeChain;
+    }
+
+    /**
+     * @return {!JavaScriptCallFrameDetails}
+     */
+    function lazyDetails()
+    {
+        if (!details) {
+            var scopeObjects = ensureScopeChain();
+            var script = ensureFuncMirror().script();
+            /** @type {!Array<Scope>} */
+            var scopes = [];
+            for (var i = 0; i < scopeObjects.length; ++i) {
+                var scope = {
+                    "type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
+                    "object": scopeObjects[i],
+                };
+                if (scopeNames[i])
+                    scope.name = scopeNames[i];
+                if (scopeStartLocations[i])
+                    scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
+                if (scopeEndLocations[i])
+                    scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
+                scopes.push(scope);
+            }
+            details = {
+                "functionName": ensureFuncMirror().debugName(),
+                "location": {
+                    "lineNumber": line(),
+                    "columnNumber": column(),
+                    "scriptId": String(script.id())
+                },
+                "this": thisObject,
+                "scopeChain": scopes
+            };
+            var functionLocation = ensureFuncMirror().sourceLocation();
+            if (functionLocation) {
+                details.functionLocation = {
+                    "lineNumber": functionLocation.line,
+                    "columnNumber": functionLocation.column,
+                    "scriptId": String(script.id())
+                };
+            }
+            if (isAtReturn)
+                details.returnValue = returnValue;
+        }
+        return details;
+    }
+
+    /**
+     * @return {!FunctionMirror}
+     */
+    function ensureFuncMirror()
+    {
+        if (!funcMirror) {
+            funcMirror = MakeMirror(funcObject);
+            if (!funcMirror.isFunction())
+                funcMirror = new UnresolvedFunctionMirror(funcObject);
+        }
+        return /** @type {!FunctionMirror} */(funcMirror);
+    }
+
+    /**
+     * @return {!{line: number, column: number}}
+     */
+    function ensureLocation()
+    {
+        if (!location) {
+            var script = ensureFuncMirror().script();
+            if (script)
+                location = script.locationFromPosition(sourcePosition, true);
+            if (!location)
+                location = { line: 0, column: 0 };
+        }
+        return location;
+    }
+
+    /**
+     * @return {number}
+     */
+    function line()
+    {
+        return ensureLocation().line;
+    }
+
+    /**
+     * @return {number}
+     */
+    function column()
+    {
+        return ensureLocation().column;
+    }
+
+    /**
+     * @return {number}
+     */
+    function contextId()
+    {
+        var mirror = ensureFuncMirror();
+        // Old V8 do not have context() function on these objects
+        if (!mirror.context)
+            return DebuggerScript._executionContextId(mirror.script().value().context_data);
+        var context = mirror.context();
+        if (context)
+            return DebuggerScript._executionContextId(context.data());
+        return 0;
+    }
+
+    /**
+     * @return {number|undefined}
+     */
+    function sourceID()
+    {
+        var script = ensureFuncMirror().script();
+        return script && script.id();
+    }
+
+    /**
+     * @param {string} expression
+     * @return {*}
+     */
+    function evaluate(expression)
+    {
+        return frameMirror.evaluate(expression, false).value();
+    }
+
+    /** @return {undefined} */
+    function restart()
+    {
+        return frameMirror.restart();
+    }
+
+    /**
+     * @param {number} scopeNumber
+     * @param {string} variableName
+     * @param {*} newValue
+     */
+    function setVariableValue(scopeNumber, variableName, newValue)
+    {
+        var scopeMirror = frameMirror.scope(scopeNumber);
+        if (!scopeMirror)
+            throw new Error("Incorrect scope index");
+        scopeMirror.setVariableValue(variableName, newValue);
+    }
+
+    return {
+        "sourceID": sourceID,
+        "line": line,
+        "column": column,
+        "contextId": contextId,
+        "thisObject": thisObject,
+        "evaluate": evaluate,
+        "restart": restart,
+        "setVariableValue": setVariableValue,
+        "isAtReturn": isAtReturn,
+        "details": lazyDetails
+    };
+}
+
+/**
+ * @param {number} scopeType
+ * @param {!Object} scopeObject
+ * @return {!Object|undefined}
+ */
+DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
+{
+    var result;
+    switch (scopeType) {
+    case ScopeType.Local:
+    case ScopeType.Closure:
+    case ScopeType.Catch:
+    case ScopeType.Block:
+    case ScopeType.Script:
+        // For transient objects we create a "persistent" copy that contains
+        // the same properties.
+        // Reset scope object prototype to null so that the proto properties
+        // don't appear in the local scope section.
+        var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
+        // Almost always Script scope will be empty, so just filter out that noise.
+        // Also drop empty Block scopes, should we get any.
+        if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
+            break;
+        result = { __proto__: null };
+        for (var j = 0; j < properties.length; j++) {
+            var name = properties[j].name();
+            if (name.length === 0 || name.charAt(0) === ".")
+                continue; // Skip internal variables like ".arguments" and variables with empty name
+            result[name] = properties[j].value_;
+        }
+        break;
+    case ScopeType.Global:
+    case ScopeType.With:
+        result = scopeObject;
+        break;
+    }
+    return result;
+}
+
+// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
+ToggleMirrorCache(false);
+
+return DebuggerScript;
+})();
diff --git a/src/inspector/debugger_script_externs.js b/src/inspector/debugger_script_externs.js
new file mode 100644
index 0000000..c7df61f
--- /dev/null
+++ b/src/inspector/debugger_script_externs.js
@@ -0,0 +1,522 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/** @typedef {{
+        type: string,
+        object: !Object,
+        name: (string|undefined),
+        startLocation: (!RawLocation|undefined),
+        endLocation: (!RawLocation|undefined)
+    }} */
+var Scope;
+
+/** @typedef {{
+        scriptId: string,
+        lineNumber: number,
+        columnNumber: number
+    }} */
+var RawLocation;
+
+/** @typedef {{
+        id: number,
+        name: string,
+        sourceURL: (string|undefined),
+        sourceMappingURL: (string|undefined),
+        source: string,
+        startLine: number,
+        endLine: number,
+        startColumn: number,
+        endColumn: number,
+        executionContextId: number,
+        executionContextAuxData: string
+    }} */
+var FormattedScript;
+
+/** @typedef {{
+        functionName: string,
+        location: !RawLocation,
+        this: !Object,
+        scopeChain: !Array<!Scope>,
+        functionLocation: (RawLocation|undefined),
+        returnValue: (*|undefined)
+    }} */
+var JavaScriptCallFrameDetails;
+
+/** @typedef {{
+        sourceID: function():(number|undefined),
+        line: function():number,
+        column: function():number,
+        thisObject: !Object,
+        evaluate: function(string):*,
+        restart: function():undefined,
+        setVariableValue: function(number, string, *):undefined,
+        isAtReturn: boolean,
+        details: function():!JavaScriptCallFrameDetails
+    }} */
+var JavaScriptCallFrame;
+
+/**
+ * @const
+ */
+var Debug = {};
+
+Debug.setBreakOnException = function() {}
+
+Debug.clearBreakOnException = function() {}
+
+Debug.setBreakOnUncaughtException = function() {}
+
+/**
+ * @return {undefined}
+ */
+Debug.clearBreakOnUncaughtException = function() {}
+
+Debug.clearStepping = function() {}
+
+Debug.clearAllBreakPoints = function() {}
+
+/** @return {!Array<!Script>} */
+Debug.scripts = function() {}
+
+/**
+ * @param {number} scriptId
+ * @param {number=} line
+ * @param {number=} column
+ * @param {string=} condition
+ * @param {string=} groupId
+ * @param {Debug.BreakPositionAlignment=} positionAlignment
+ */
+Debug.setScriptBreakPointById = function(scriptId, line, column, condition, groupId, positionAlignment) {}
+
+/**
+ * @param {number} breakId
+ * @return {!Array<!SourceLocation>}
+ */
+Debug.findBreakPointActualLocations = function(breakId) {}
+
+/**
+ * @param {number} breakId
+ * @param {boolean} remove
+ * @return {!BreakPoint|undefined}
+ */
+Debug.findBreakPoint = function(breakId, remove) {}
+
+/** @return {!DebuggerFlags} */
+Debug.debuggerFlags = function() {}
+
+
+/** @enum */
+const BreakPositionAlignment = {
+    Statement: 0,
+    BreakPosition: 1
+};
+Debug.BreakPositionAlignment = BreakPositionAlignment;
+
+/** @enum */
+Debug.StepAction = { StepOut: 0,
+                     StepNext: 1,
+                     StepIn: 2,
+                     StepFrame: 3 };
+
+/** @enum */
+const ScriptCompilationType = { Host: 0,
+                              Eval: 1,
+                              JSON: 2 };
+Debug.ScriptCompilationType = ScriptCompilationType;
+
+
+/** @interface */
+function DebuggerFlag() {}
+
+/** @param {boolean} value */
+DebuggerFlag.prototype.setValue = function(value) {}
+
+
+/** @typedef {{
+ *    breakPointsActive: !DebuggerFlag
+ *  }}
+ */
+var DebuggerFlags;
+
+/** @const */
+var LiveEdit = {}
+
+/**
+ * @param {!Script} script
+ * @param {string} newSource
+ * @param {boolean} previewOnly
+ * @return {!{stack_modified: (boolean|undefined)}}
+ */
+LiveEdit.SetScriptSource = function(script, newSource, previewOnly, change_log) {}
+
+/** @constructor */
+function Failure() {}
+LiveEdit.Failure = Failure;
+
+Debug.LiveEdit = LiveEdit;
+
+/** @typedef {{
+ *    type: string,
+ *    syntaxErrorMessage: string,
+ *    position: !{start: !{line: number, column: number}},
+ *  }}
+ */
+var LiveEditErrorDetails;
+
+/** @typedef {{
+ *    breakpointId: number,
+ *    sourceID: number,
+ *    lineNumber: (number|undefined),
+ *    columnNumber: (number|undefined),
+ *    condition: (string|undefined),
+ *    interstatementLocation: (boolean|undefined),
+ *    }}
+ */
+var BreakpointInfo;
+
+
+/** @interface */
+function BreakPoint() {}
+
+/** @return {!BreakPoint|undefined} */
+BreakPoint.prototype.script_break_point = function() {}
+
+/** @return {number} */
+BreakPoint.prototype.number = function() {}
+
+
+/** @interface */
+function CompileEvent() {}
+
+/** @return {!ScriptMirror} */
+CompileEvent.prototype.script = function() {}
+
+
+/** @interface */
+function BreakEvent() {}
+
+/** @return {!Array<!BreakPoint>|undefined} */
+BreakEvent.prototype.breakPointsHit = function() {}
+
+
+/** @interface */
+function ExecutionState() {}
+
+/** @param {!Debug.StepAction} action */
+ExecutionState.prototype.prepareStep = function(action) {}
+
+/**
+ * @param {string} source
+ * @param {boolean} disableBreak
+ * @param {*=} additionalContext
+ */
+ExecutionState.prototype.evaluateGlobal = function(source, disableBreak, additionalContext) {}
+
+/** @return {number} */
+ExecutionState.prototype.frameCount = function() {}
+
+/**
+ * @param {number} index
+ * @return {!FrameMirror}
+ */
+ExecutionState.prototype.frame = function(index) {}
+
+/** @param {number} index */
+ExecutionState.prototype.setSelectedFrame = function(index) {}
+
+/** @return {number} */
+ExecutionState.prototype.selectedFrame = function() {}
+
+
+/** @enum */
+var ScopeType = { Global: 0,
+                  Local: 1,
+                  With: 2,
+                  Closure: 3,
+                  Catch: 4,
+                  Block: 5,
+                  Script: 6 };
+
+
+/** @typedef {{
+ *    script: number,
+ *    position: number,
+ *    line: number,
+ *    column:number,
+ *    start: number,
+ *    end: number,
+ *    }}
+ */
+var SourceLocation;
+
+/** @typedef{{
+ *    id: number,
+ *    context_data: (string|undefined),
+ *    source_url: (string|undefined),
+ *    source_mapping_url: (string|undefined),
+ *    is_debugger_script: boolean,
+ *    source: string,
+ *    line_ends: !Array<number>,
+ *    line_offset: number,
+ *    column_offset: number,
+ *    nameOrSourceURL: function():string,
+ *    compilationType: function():!ScriptCompilationType,
+ *    }}
+ */
+var Script;
+
+/** @interface */
+function ScopeDetails() {}
+
+/** @return {!Object} */
+ScopeDetails.prototype.object = function() {}
+
+/** @return {string|undefined} */
+ScopeDetails.prototype.name = function() {}
+
+/** @return {number} */
+ScopeDetails.prototype.type = function() {}
+
+
+/** @interface */
+function FrameDetails() {}
+
+/** @return {!Object} */
+FrameDetails.prototype.receiver = function() {}
+
+/** @return {function()} */
+FrameDetails.prototype.func = function() {}
+
+/** @return {boolean} */
+FrameDetails.prototype.isAtReturn = function() {}
+
+/** @return {number} */
+FrameDetails.prototype.sourcePosition = function() {}
+
+/** @return {*} */
+FrameDetails.prototype.returnValue = function() {}
+
+/** @return {number} */
+FrameDetails.prototype.scopeCount = function() {}
+
+
+/** @param {boolean} value */
+function ToggleMirrorCache(value) {}
+
+/**
+ * @param {*} value
+ * @param {boolean=} transient
+ * @return {!Mirror}
+ */
+function MakeMirror(value, transient) {}
+
+
+/** @interface */
+function Mirror() {}
+
+/** @return {boolean} */
+Mirror.prototype.isFunction = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isGenerator = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isMap = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isSet = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isIterator = function() {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ObjectMirror() {}
+
+/** @return {!Array<!PropertyMirror>} */
+ObjectMirror.prototype.properties = function() {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function FunctionMirror () {}
+
+/** @return {number} */
+FunctionMirror.prototype.scopeCount = function() {}
+
+/**
+ * @param {number} index
+ * @return {!ScopeMirror|undefined}
+ */
+FunctionMirror.prototype.scope = function(index) {}
+
+/** @return {boolean} */
+FunctionMirror.prototype.resolved = function() {}
+
+/** @return {function()} */
+FunctionMirror.prototype.value = function() {}
+
+/** @return {string} */
+FunctionMirror.prototype.debugName = function() {}
+
+/** @return {!ScriptMirror|undefined} */
+FunctionMirror.prototype.script = function() {}
+
+/** @return {!SourceLocation|undefined} */
+FunctionMirror.prototype.sourceLocation = function() {}
+
+/** @return {!ContextMirror|undefined} */
+FunctionMirror.prototype.context = function() {}
+
+/**
+ * @constructor
+ * @param {*} value
+ */
+function UnresolvedFunctionMirror(value) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function MapMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<!{key: *, value: *}>}
+ */
+MapMirror.prototype.entries = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function SetMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<*>}
+ */
+SetMirror.prototype.values = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function IteratorMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<*>}
+ */
+IteratorMirror.prototype.preview = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function GeneratorMirror () {}
+
+/** @return {string} */
+GeneratorMirror.prototype.status = function() {}
+
+/** @return {!SourceLocation|undefined} */
+GeneratorMirror.prototype.sourceLocation = function() {}
+
+/** @return {!FunctionMirror} */
+GeneratorMirror.prototype.func = function() {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function PropertyMirror() {}
+
+/** @return {!Mirror} */
+PropertyMirror.prototype.value = function() {}
+
+/** @return {string} */
+PropertyMirror.prototype.name = function() {}
+
+/** @type {*} */
+PropertyMirror.prototype.value_;
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function FrameMirror() {}
+
+/**
+ * @param {boolean=} ignoreNestedScopes
+ * @return {!Array<!ScopeMirror>}
+ */
+FrameMirror.prototype.allScopes = function(ignoreNestedScopes) {}
+
+/** @return {!FrameDetails} */
+FrameMirror.prototype.details = function() {}
+
+/**
+ * @param {string} source
+ * @param {boolean} disableBreak
+ */
+FrameMirror.prototype.evaluate = function(source, disableBreak) {}
+
+FrameMirror.prototype.restart = function() {}
+
+/** @param {number} index */
+FrameMirror.prototype.scope = function(index) {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ScriptMirror() {}
+
+/** @return {!Script} */
+ScriptMirror.prototype.value = function() {}
+
+/** @return {number} */
+ScriptMirror.prototype.id = function() {}
+
+/**
+ * @param {number} position
+ * @param {boolean=} includeResourceOffset
+ */
+ScriptMirror.prototype.locationFromPosition = function(position, includeResourceOffset) {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ScopeMirror() {}
+
+/** @return {!ScopeDetails} */
+ScopeMirror.prototype.details = function() {}
+
+/**
+ * @param {string} name
+ * @param {*} newValue
+ */
+ScopeMirror.prototype.setVariableValue = function(name, newValue) {}
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ContextMirror() {}
+
+/** @return {string|undefined} */
+ContextMirror.prototype.data = function() {}
diff --git a/src/inspector/injected-script-native.cc b/src/inspector/injected-script-native.cc
new file mode 100644
index 0000000..fcf2ead
--- /dev/null
+++ b/src/inspector/injected-script-native.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/injected-script-native.h"
+
+namespace v8_inspector {
+
+InjectedScriptNative::InjectedScriptNative(v8::Isolate* isolate)
+    : m_lastBoundObjectId(1), m_isolate(isolate) {}
+
+static const char privateKeyName[] = "v8-inspector#injectedScript";
+
+InjectedScriptNative::~InjectedScriptNative() {}
+
+void InjectedScriptNative::setOnInjectedScriptHost(
+    v8::Local<v8::Object> injectedScriptHost) {
+  v8::HandleScope handleScope(m_isolate);
+  v8::Local<v8::External> external = v8::External::New(m_isolate, this);
+  v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+      m_isolate, v8::String::NewFromUtf8(m_isolate, privateKeyName,
+                                         v8::NewStringType::kInternalized)
+                     .ToLocalChecked());
+  injectedScriptHost->SetPrivate(m_isolate->GetCurrentContext(), privateKey,
+                                 external);
+}
+
+InjectedScriptNative* InjectedScriptNative::fromInjectedScriptHost(
+    v8::Isolate* isolate, v8::Local<v8::Object> injectedScriptObject) {
+  v8::HandleScope handleScope(isolate);
+  v8::Local<v8::Context> context = isolate->GetCurrentContext();
+  v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+      isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
+                                       v8::NewStringType::kInternalized)
+                   .ToLocalChecked());
+  v8::Local<v8::Value> value =
+      injectedScriptObject->GetPrivate(context, privateKey).ToLocalChecked();
+  DCHECK(value->IsExternal());
+  v8::Local<v8::External> external = value.As<v8::External>();
+  return static_cast<InjectedScriptNative*>(external->Value());
+}
+
+int InjectedScriptNative::bind(v8::Local<v8::Value> value,
+                               const String16& groupName) {
+  if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
+  int id = m_lastBoundObjectId++;
+  m_idToWrappedObject[id] =
+      wrapUnique(new v8::Global<v8::Value>(m_isolate, value));
+  addObjectToGroup(id, groupName);
+  return id;
+}
+
+void InjectedScriptNative::unbind(int id) {
+  m_idToWrappedObject.erase(id);
+  m_idToObjectGroupName.erase(id);
+}
+
+v8::Local<v8::Value> InjectedScriptNative::objectForId(int id) {
+  auto iter = m_idToWrappedObject.find(id);
+  return iter != m_idToWrappedObject.end() ? iter->second->Get(m_isolate)
+                                           : v8::Local<v8::Value>();
+}
+
+void InjectedScriptNative::addObjectToGroup(int objectId,
+                                            const String16& groupName) {
+  if (groupName.isEmpty()) return;
+  if (objectId <= 0) return;
+  m_idToObjectGroupName[objectId] = groupName;
+  m_nameToObjectGroup[groupName].push_back(
+      objectId);  // Creates an empty vector if key is not there
+}
+
+void InjectedScriptNative::releaseObjectGroup(const String16& groupName) {
+  if (groupName.isEmpty()) return;
+  NameToObjectGroup::iterator groupIt = m_nameToObjectGroup.find(groupName);
+  if (groupIt == m_nameToObjectGroup.end()) return;
+  for (int id : groupIt->second) unbind(id);
+  m_nameToObjectGroup.erase(groupIt);
+}
+
+String16 InjectedScriptNative::groupName(int objectId) const {
+  if (objectId <= 0) return String16();
+  IdToObjectGroupName::const_iterator iterator =
+      m_idToObjectGroupName.find(objectId);
+  return iterator != m_idToObjectGroupName.end() ? iterator->second
+                                                 : String16();
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/injected-script-native.h b/src/inspector/injected-script-native.h
new file mode 100644
index 0000000..3bdf247
--- /dev/null
+++ b/src/inspector/injected-script-native.h
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
+#define V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
+
+#include <vector>
+
+#include "src/inspector/protocol/Protocol.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScriptNative final {
+ public:
+  explicit InjectedScriptNative(v8::Isolate*);
+  ~InjectedScriptNative();
+
+  void setOnInjectedScriptHost(v8::Local<v8::Object>);
+  static InjectedScriptNative* fromInjectedScriptHost(v8::Isolate* isolate,
+                                                      v8::Local<v8::Object>);
+
+  int bind(v8::Local<v8::Value>, const String16& groupName);
+  void unbind(int id);
+  v8::Local<v8::Value> objectForId(int id);
+
+  void releaseObjectGroup(const String16& groupName);
+  String16 groupName(int objectId) const;
+
+ private:
+  void addObjectToGroup(int objectId, const String16& groupName);
+
+  int m_lastBoundObjectId;
+  v8::Isolate* m_isolate;
+  protocol::HashMap<int, std::unique_ptr<v8::Global<v8::Value>>>
+      m_idToWrappedObject;
+  typedef protocol::HashMap<int, String16> IdToObjectGroupName;
+  IdToObjectGroupName m_idToObjectGroupName;
+  typedef protocol::HashMap<String16, std::vector<int>> NameToObjectGroup;
+  NameToObjectGroup m_nameToObjectGroup;
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
diff --git a/src/inspector/injected-script-source.js b/src/inspector/injected-script-source.js
new file mode 100644
index 0000000..39c6c9c
--- /dev/null
+++ b/src/inspector/injected-script-source.js
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (C) 2007 Apple Inc.  All rights reserved.
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+"use strict";
+
+/**
+ * @param {!InjectedScriptHostClass} InjectedScriptHost
+ * @param {!Window|!WorkerGlobalScope} inspectedGlobalObject
+ * @param {number} injectedScriptId
+ * @suppress {uselessCode}
+ */
+(function (InjectedScriptHost, inspectedGlobalObject, injectedScriptId) {
+
+/**
+ * Protect against Object overwritten by the user code.
+ * @suppress {duplicate}
+ */
+var Object = /** @type {function(new:Object, *=)} */ ({}.constructor);
+
+/**
+ * @param {!Array.<T>} array
+ * @param {...} var_args
+ * @template T
+ */
+function push(array, var_args)
+{
+    for (var i = 1; i < arguments.length; ++i)
+        array[array.length] = arguments[i];
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ * @suppress {uselessCode}
+ */
+function toString(obj)
+{
+    // We don't use String(obj) because String could be overridden.
+    // Also the ("" + obj) expression may throw.
+    try {
+        return "" + obj;
+    } catch (e) {
+        var name = InjectedScriptHost.internalConstructorName(obj) || InjectedScriptHost.subtype(obj) || (typeof obj);
+        return "#<" + name + ">";
+    }
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+function toStringDescription(obj)
+{
+    if (typeof obj === "number" && obj === 0 && 1 / obj < 0)
+        return "-0"; // Negative zero.
+    return toString(obj);
+}
+
+/**
+ * @param {T} obj
+ * @return {T}
+ * @template T
+ */
+function nullifyObjectProto(obj)
+{
+    if (obj && typeof obj === "object")
+        obj.__proto__ = null;
+    return obj;
+}
+
+/**
+ * @param {number|string} obj
+ * @return {boolean}
+ */
+function isUInt32(obj)
+{
+    if (typeof obj === "number")
+        return obj >>> 0 === obj && (obj > 0 || 1 / obj > 0);
+    return "" + (obj >>> 0) === obj;
+}
+
+/**
+ * FireBug's array detection.
+ * @param {*} obj
+ * @return {boolean}
+ */
+function isArrayLike(obj)
+{
+    if (typeof obj !== "object")
+        return false;
+    try {
+        if (typeof obj.splice === "function") {
+            if (!InjectedScriptHost.objectHasOwnProperty(/** @type {!Object} */ (obj), "length"))
+                return false;
+            var len = obj.length;
+            return typeof len === "number" && isUInt32(len);
+        }
+    } catch (e) {
+    }
+    return false;
+}
+
+/**
+ * @param {number} a
+ * @param {number} b
+ * @return {number}
+ */
+function max(a, b)
+{
+    return a > b ? a : b;
+}
+
+/**
+ * FIXME: Remove once ES6 is supported natively by JS compiler.
+ * @param {*} obj
+ * @return {boolean}
+ */
+function isSymbol(obj)
+{
+    var type = typeof obj;
+    return (type === "symbol");
+}
+
+/**
+ * DOM Attributes which have observable side effect on getter, in the form of
+ *   {interfaceName1: {attributeName1: true,
+ *                     attributeName2: true,
+ *                     ...},
+ *    interfaceName2: {...},
+ *    ...}
+ * @type {!Object<string, !Object<string, boolean>>}
+ * @const
+ */
+var domAttributesWithObservableSideEffectOnGet = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Request"] = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Request"]["body"] = true;
+domAttributesWithObservableSideEffectOnGet["Response"] = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Response"]["body"] = true;
+
+/**
+ * @param {!Object} object
+ * @param {string} attribute
+ * @return {boolean}
+ */
+function doesAttributeHaveObservableSideEffectOnGet(object, attribute)
+{
+    for (var interfaceName in domAttributesWithObservableSideEffectOnGet) {
+        var interfaceFunction = inspectedGlobalObject[interfaceName];
+        // Call to instanceOf looks safe after typeof check.
+        var isInstance = typeof interfaceFunction === "function" && /* suppressBlacklist */ object instanceof interfaceFunction;
+        if (isInstance)
+            return attribute in domAttributesWithObservableSideEffectOnGet[interfaceName];
+    }
+    return false;
+}
+
+/**
+ * @constructor
+ */
+var InjectedScript = function()
+{
+}
+
+/**
+ * @type {!Object.<string, boolean>}
+ * @const
+ */
+InjectedScript.primitiveTypes = {
+    "undefined": true,
+    "boolean": true,
+    "number": true,
+    "string": true,
+    __proto__: null
+}
+
+/**
+ * @type {!Object<string, string>}
+ * @const
+ */
+InjectedScript.closureTypes = { __proto__: null };
+InjectedScript.closureTypes["local"] = "Local";
+InjectedScript.closureTypes["closure"] = "Closure";
+InjectedScript.closureTypes["catch"] = "Catch";
+InjectedScript.closureTypes["block"] = "Block";
+InjectedScript.closureTypes["script"] = "Script";
+InjectedScript.closureTypes["with"] = "With Block";
+InjectedScript.closureTypes["global"] = "Global";
+
+InjectedScript.prototype = {
+    /**
+     * @param {*} object
+     * @return {boolean}
+     */
+    isPrimitiveValue: function(object)
+    {
+        // FIXME(33716): typeof document.all is always 'undefined'.
+        return InjectedScript.primitiveTypes[typeof object] && !this._isHTMLAllCollection(object);
+    },
+
+    /**
+     * @param {*} object
+     * @return {boolean}
+     */
+    _shouldPassByValue: function(object)
+    {
+        return typeof object === "object" && InjectedScriptHost.subtype(object) === "internal#location";
+    },
+
+    /**
+     * @param {*} object
+     * @param {string} groupName
+     * @param {boolean} forceValueType
+     * @param {boolean} generatePreview
+     * @return {!RuntimeAgent.RemoteObject}
+     */
+    wrapObject: function(object, groupName, forceValueType, generatePreview)
+    {
+        return this._wrapObject(object, groupName, forceValueType, generatePreview);
+    },
+
+    /**
+     * @param {!Array<!Object>} array
+     * @param {string} property
+     * @param {string} groupName
+     * @param {boolean} forceValueType
+     * @param {boolean} generatePreview
+     */
+    wrapPropertyInArray: function(array, property, groupName, forceValueType, generatePreview)
+    {
+        for (var i = 0; i < array.length; ++i) {
+            if (typeof array[i] === "object" && property in array[i])
+                array[i][property] = this.wrapObject(array[i][property], groupName, forceValueType, generatePreview);
+        }
+    },
+
+    /**
+     * @param {!Array<*>} array
+     * @param {string} groupName
+     * @param {boolean} forceValueType
+     * @param {boolean} generatePreview
+     */
+    wrapObjectsInArray: function(array, groupName, forceValueType, generatePreview)
+    {
+        for (var i = 0; i < array.length; ++i)
+            array[i] = this.wrapObject(array[i], groupName, forceValueType, generatePreview);
+    },
+
+    /**
+     * @param {!Object} table
+     * @param {!Array.<string>|string|boolean} columns
+     * @return {!RuntimeAgent.RemoteObject}
+     */
+    wrapTable: function(table, columns)
+    {
+        var columnNames = null;
+        if (typeof columns === "string")
+            columns = [columns];
+        if (InjectedScriptHost.subtype(columns) === "array") {
+            columnNames = [];
+            for (var i = 0; i < columns.length; ++i)
+                columnNames[i] = toString(columns[i]);
+        }
+        return this._wrapObject(table, "console", false, true, columnNames, true);
+    },
+
+    /**
+     * This method cannot throw.
+     * @param {*} object
+     * @param {string=} objectGroupName
+     * @param {boolean=} forceValueType
+     * @param {boolean=} generatePreview
+     * @param {?Array.<string>=} columnNames
+     * @param {boolean=} isTable
+     * @param {boolean=} doNotBind
+     * @param {*=} customObjectConfig
+     * @return {!RuntimeAgent.RemoteObject}
+     * @suppress {checkTypes}
+     */
+    _wrapObject: function(object, objectGroupName, forceValueType, generatePreview, columnNames, isTable, doNotBind, customObjectConfig)
+    {
+        try {
+            return new InjectedScript.RemoteObject(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, undefined, customObjectConfig);
+        } catch (e) {
+            try {
+                var description = injectedScript._describe(e);
+            } catch (ex) {
+                var description = "<failed to convert exception to string>";
+            }
+            return new InjectedScript.RemoteObject(description);
+        }
+    },
+
+    /**
+     * @param {!Object|symbol} object
+     * @param {string=} objectGroupName
+     * @return {string}
+     */
+    _bind: function(object, objectGroupName)
+    {
+        var id = InjectedScriptHost.bind(object, objectGroupName || "");
+        return "{\"injectedScriptId\":" + injectedScriptId + ",\"id\":" + id + "}";
+    },
+
+    /**
+     * @param {!Object} object
+     * @param {string} objectGroupName
+     * @param {boolean} ownProperties
+     * @param {boolean} accessorPropertiesOnly
+     * @param {boolean} generatePreview
+     * @return {!Array<!RuntimeAgent.PropertyDescriptor>|boolean}
+     */
+    getProperties: function(object, objectGroupName, ownProperties, accessorPropertiesOnly, generatePreview)
+    {
+        var subtype = this._subtype(object);
+        if (subtype === "internal#scope") {
+            // Internally, scope contains object with scope variables and additional information like type,
+            // we use additional information for preview and would like to report variables as scope
+            // properties.
+            object = object.object;
+        }
+
+        var descriptors = [];
+        var iter = this._propertyDescriptors(object, ownProperties, accessorPropertiesOnly, undefined);
+        // Go over properties, wrap object values.
+        for (var descriptor of iter) {
+            if (subtype === "internal#scopeList" && descriptor.name === "length")
+                continue;
+            if ("get" in descriptor)
+                descriptor.get = this._wrapObject(descriptor.get, objectGroupName);
+            if ("set" in descriptor)
+                descriptor.set = this._wrapObject(descriptor.set, objectGroupName);
+            if ("value" in descriptor)
+                descriptor.value = this._wrapObject(descriptor.value, objectGroupName, false, generatePreview);
+            if (!("configurable" in descriptor))
+                descriptor.configurable = false;
+            if (!("enumerable" in descriptor))
+                descriptor.enumerable = false;
+            if ("symbol" in descriptor)
+                descriptor.symbol = this._wrapObject(descriptor.symbol, objectGroupName);
+            push(descriptors, descriptor);
+        }
+        return descriptors;
+    },
+
+    /**
+     * @param {!Object} object
+     * @return {?Object}
+     */
+    _objectPrototype: function(object)
+    {
+        if (InjectedScriptHost.subtype(object) === "proxy")
+            return null;
+        try {
+            return Object.getPrototypeOf(object);
+        } catch (e) {
+            return null;
+        }
+    },
+
+    /**
+     * @param {!Object} object
+     * @param {boolean=} ownProperties
+     * @param {boolean=} accessorPropertiesOnly
+     * @param {?Array.<string>=} propertyNamesOnly
+     */
+    _propertyDescriptors: function*(object, ownProperties, accessorPropertiesOnly, propertyNamesOnly)
+    {
+        var propertyProcessed = { __proto__: null };
+
+        /**
+         * @param {?Object} o
+         * @param {!Iterable<string|symbol|number>|!Array<string|number|symbol>} properties
+         */
+        function* process(o, properties)
+        {
+            for (var property of properties) {
+                var name;
+                if (isSymbol(property))
+                    name = /** @type {string} */ (injectedScript._describe(property));
+                else
+                    name = typeof property === "number" ? ("" + property) : /** @type {string} */(property);
+
+                if (propertyProcessed[property])
+                    continue;
+
+                try {
+                    propertyProcessed[property] = true;
+                    var descriptor = nullifyObjectProto(Object.getOwnPropertyDescriptor(o, property));
+                    if (descriptor) {
+                        if (accessorPropertiesOnly && !("get" in descriptor || "set" in descriptor))
+                            continue;
+                        if ("get" in descriptor && "set" in descriptor && name != "__proto__" && InjectedScriptHost.formatAccessorsAsProperties(object, descriptor.get) && !doesAttributeHaveObservableSideEffectOnGet(object, name)) {
+                            descriptor.value = object[property];
+                            descriptor.isOwn = true;
+                            delete descriptor.get;
+                            delete descriptor.set;
+                        }
+                    } else {
+                        // Not all bindings provide proper descriptors. Fall back to the writable, configurable property.
+                        if (accessorPropertiesOnly)
+                            continue;
+                        try {
+                            descriptor = { name: name, value: o[property], writable: false, configurable: false, enumerable: false, __proto__: null };
+                            if (o === object)
+                                descriptor.isOwn = true;
+                            yield descriptor;
+                        } catch (e) {
+                            // Silent catch.
+                        }
+                        continue;
+                    }
+                } catch (e) {
+                    if (accessorPropertiesOnly)
+                        continue;
+                    var descriptor = { __proto__: null };
+                    descriptor.value = e;
+                    descriptor.wasThrown = true;
+                }
+
+                descriptor.name = name;
+                if (o === object)
+                    descriptor.isOwn = true;
+                if (isSymbol(property))
+                    descriptor.symbol = property;
+                yield descriptor;
+            }
+        }
+
+        if (propertyNamesOnly) {
+            for (var i = 0; i < propertyNamesOnly.length; ++i) {
+                var name = propertyNamesOnly[i];
+                for (var o = object; this._isDefined(o); o = this._objectPrototype(o)) {
+                    if (InjectedScriptHost.objectHasOwnProperty(o, name)) {
+                        for (var descriptor of process(o, [name]))
+                            yield descriptor;
+                        break;
+                    }
+                    if (ownProperties)
+                        break;
+                }
+            }
+            return;
+        }
+
+        /**
+         * @param {number} length
+         */
+        function* arrayIndexNames(length)
+        {
+            for (var i = 0; i < length; ++i)
+                yield "" + i;
+        }
+
+        var skipGetOwnPropertyNames;
+        try {
+            skipGetOwnPropertyNames = InjectedScriptHost.subtype(object) === "typedarray" && object.length > 500000;
+        } catch (e) {
+        }
+
+        for (var o = object; this._isDefined(o); o = this._objectPrototype(o)) {
+            if (InjectedScriptHost.subtype(o) === "proxy")
+                continue;
+            if (skipGetOwnPropertyNames && o === object) {
+                // Avoid OOM crashes from getting all own property names of a large TypedArray.
+                for (var descriptor of process(o, arrayIndexNames(o.length)))
+                    yield descriptor;
+            } else {
+                // First call Object.keys() to enforce ordering of the property descriptors.
+                for (var descriptor of process(o, Object.keys(/** @type {!Object} */ (o))))
+                    yield descriptor;
+                for (var descriptor of process(o, Object.getOwnPropertyNames(/** @type {!Object} */ (o))))
+                    yield descriptor;
+            }
+            if (Object.getOwnPropertySymbols) {
+                for (var descriptor of process(o, Object.getOwnPropertySymbols(/** @type {!Object} */ (o))))
+                    yield descriptor;
+            }
+            if (ownProperties) {
+                var proto = this._objectPrototype(o);
+                if (proto && !accessorPropertiesOnly)
+                    yield { name: "__proto__", value: proto, writable: true, configurable: true, enumerable: false, isOwn: true, __proto__: null };
+                break;
+            }
+        }
+    },
+
+    /**
+     * @param {string|undefined} objectGroupName
+     * @param {*} jsonMLObject
+     * @throws {string} error message
+     */
+    _substituteObjectTagsInCustomPreview: function(objectGroupName, jsonMLObject)
+    {
+        var maxCustomPreviewRecursionDepth = 20;
+        this._customPreviewRecursionDepth = (this._customPreviewRecursionDepth || 0) + 1
+        try {
+            if (this._customPreviewRecursionDepth >= maxCustomPreviewRecursionDepth)
+                throw new Error("Too deep hierarchy of inlined custom previews");
+
+            if (!isArrayLike(jsonMLObject))
+                return;
+
+            if (jsonMLObject[0] === "object") {
+                var attributes = jsonMLObject[1];
+                var originObject = attributes["object"];
+                var config = attributes["config"];
+                if (typeof originObject === "undefined")
+                    throw new Error("Illegal format: obligatory attribute \"object\" isn't specified");
+
+                jsonMLObject[1] = this._wrapObject(originObject, objectGroupName, false, false, null, false, false, config);
+                return;
+            }
+
+            for (var i = 0; i < jsonMLObject.length; ++i)
+                this._substituteObjectTagsInCustomPreview(objectGroupName, jsonMLObject[i]);
+        } finally {
+            this._customPreviewRecursionDepth--;
+        }
+    },
+
+    /**
+     * @param {*} object
+     * @return {boolean}
+     */
+    _isDefined: function(object)
+    {
+        return !!object || this._isHTMLAllCollection(object);
+    },
+
+    /**
+     * @param {*} object
+     * @return {boolean}
+     */
+    _isHTMLAllCollection: function(object)
+    {
+        // document.all is reported as undefined, but we still want to process it.
+        return (typeof object === "undefined") && !!InjectedScriptHost.subtype(object);
+    },
+
+    /**
+     * @param {*} obj
+     * @return {?string}
+     */
+    _subtype: function(obj)
+    {
+        if (obj === null)
+            return "null";
+
+        if (this.isPrimitiveValue(obj))
+            return null;
+
+        var subtype = InjectedScriptHost.subtype(obj);
+        if (subtype)
+            return subtype;
+
+        if (isArrayLike(obj))
+            return "array";
+
+        // If owning frame has navigated to somewhere else window properties will be undefined.
+        return null;
+    },
+
+    /**
+     * @param {*} obj
+     * @return {?string}
+     */
+    _describe: function(obj)
+    {
+        if (this.isPrimitiveValue(obj))
+            return null;
+
+        var subtype = this._subtype(obj);
+
+        if (subtype === "regexp")
+            return toString(obj);
+
+        if (subtype === "date")
+            return toString(obj);
+
+        if (subtype === "node") {
+            var description = "";
+            if (obj.nodeName)
+                description = obj.nodeName.toLowerCase();
+            else if (obj.constructor)
+                description = obj.constructor.name.toLowerCase();
+
+            switch (obj.nodeType) {
+            case 1 /* Node.ELEMENT_NODE */:
+                description += obj.id ? "#" + obj.id : "";
+                var className = obj.className;
+                description += (className && typeof className === "string") ? "." + className.trim().replace(/\s+/g, ".") : "";
+                break;
+            case 10 /*Node.DOCUMENT_TYPE_NODE */:
+                description = "<!DOCTYPE " + description + ">";
+                break;
+            }
+            return description;
+        }
+
+        if (subtype === "proxy")
+            return "Proxy";
+
+        var className = InjectedScriptHost.internalConstructorName(obj);
+        if (subtype === "array" || subtype === "typedarray") {
+            if (typeof obj.length === "number")
+                className += "[" + obj.length + "]";
+            return className;
+        }
+
+        if (typeof obj === "function")
+            return toString(obj);
+
+        if (isSymbol(obj)) {
+            try {
+                // It isn't safe, because Symbol.prototype.toString can be overriden.
+                return /* suppressBlacklist */ obj.toString() || "Symbol";
+            } catch (e) {
+                return "Symbol";
+            }
+        }
+
+        if (InjectedScriptHost.subtype(obj) === "error") {
+            try {
+                var stack = obj.stack;
+                var message = obj.message && obj.message.length ? ": " + obj.message : "";
+                var firstCallFrame = /^\s+at\s/m.exec(stack);
+                var stackMessageEnd = firstCallFrame ? firstCallFrame.index : -1;
+                if (stackMessageEnd !== -1) {
+                    var stackTrace = stack.substr(stackMessageEnd);
+                    return className + message + "\n" + stackTrace;
+                }
+                return className + message;
+            } catch(e) {
+            }
+        }
+
+        if (subtype === "internal#entry") {
+            if ("key" in obj)
+                return "{" + this._describeIncludingPrimitives(obj.key) + " => " + this._describeIncludingPrimitives(obj.value) + "}";
+            return this._describeIncludingPrimitives(obj.value);
+        }
+
+        if (subtype === "internal#scopeList")
+            return "Scopes[" + obj.length + "]";
+
+        if (subtype === "internal#scope")
+            return (InjectedScript.closureTypes[obj.type] || "Unknown") + (obj.name ? " (" + obj.name + ")" : "");
+
+        return className;
+    },
+
+    /**
+     * @param {*} value
+     * @return {string}
+     */
+    _describeIncludingPrimitives: function(value)
+    {
+        if (typeof value === "string")
+            return "\"" + value.replace(/\n/g, "\u21B5") + "\"";
+        if (value === null)
+            return "" + value;
+        return this.isPrimitiveValue(value) ? toStringDescription(value) : (this._describe(value) || "");
+    },
+
+    /**
+     * @param {boolean} enabled
+     */
+    setCustomObjectFormatterEnabled: function(enabled)
+    {
+        this._customObjectFormatterEnabled = enabled;
+    }
+}
+
+/**
+ * @type {!InjectedScript}
+ * @const
+ */
+var injectedScript = new InjectedScript();
+
+/**
+ * @constructor
+ * @param {*} object
+ * @param {string=} objectGroupName
+ * @param {boolean=} doNotBind
+ * @param {boolean=} forceValueType
+ * @param {boolean=} generatePreview
+ * @param {?Array.<string>=} columnNames
+ * @param {boolean=} isTable
+ * @param {boolean=} skipEntriesPreview
+ * @param {*=} customObjectConfig
+ */
+InjectedScript.RemoteObject = function(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, skipEntriesPreview, customObjectConfig)
+{
+    this.type = typeof object;
+    if (this.type === "undefined" && injectedScript._isHTMLAllCollection(object))
+        this.type = "object";
+
+    if (injectedScript.isPrimitiveValue(object) || object === null || forceValueType) {
+        // We don't send undefined values over JSON.
+        if (this.type !== "undefined")
+            this.value = object;
+
+        // Null object is object with 'null' subtype.
+        if (object === null)
+            this.subtype = "null";
+
+        // Provide user-friendly number values.
+        if (this.type === "number") {
+            this.description = toStringDescription(object);
+            switch (this.description) {
+            case "NaN":
+            case "Infinity":
+            case "-Infinity":
+            case "-0":
+                delete this.value;
+                this.unserializableValue = this.description;
+                break;
+            }
+        }
+
+        return;
+    }
+
+    if (injectedScript._shouldPassByValue(object)) {
+        this.value = object;
+        this.subtype = injectedScript._subtype(object);
+        this.description = injectedScript._describeIncludingPrimitives(object);
+        return;
+    }
+
+    object = /** @type {!Object} */ (object);
+
+    if (!doNotBind)
+        this.objectId = injectedScript._bind(object, objectGroupName);
+    var subtype = injectedScript._subtype(object);
+    if (subtype)
+        this.subtype = subtype;
+    var className = InjectedScriptHost.internalConstructorName(object);
+    if (className)
+        this.className = className;
+    this.description = injectedScript._describe(object);
+
+    if (generatePreview && this.type === "object") {
+        if (this.subtype === "proxy")
+            this.preview = this._generatePreview(InjectedScriptHost.proxyTargetValue(object), undefined, columnNames, isTable, skipEntriesPreview);
+        else if (this.subtype !== "node")
+            this.preview = this._generatePreview(object, undefined, columnNames, isTable, skipEntriesPreview);
+    }
+
+    if (injectedScript._customObjectFormatterEnabled) {
+        var customPreview = this._customPreview(object, objectGroupName, customObjectConfig);
+        if (customPreview)
+            this.customPreview = customPreview;
+    }
+}
+
+InjectedScript.RemoteObject.prototype = {
+
+    /**
+     * @param {*} object
+     * @param {string=} objectGroupName
+     * @param {*=} customObjectConfig
+     * @return {?RuntimeAgent.CustomPreview}
+     */
+    _customPreview: function(object, objectGroupName, customObjectConfig)
+    {
+        /**
+         * @param {!Error} error
+         */
+        function logError(error)
+        {
+            // We use user code to generate custom output for object, we can use user code for reporting error too.
+            Promise.resolve().then(/* suppressBlacklist */ inspectedGlobalObject.console.error.bind(inspectedGlobalObject.console, "Custom Formatter Failed: " + error.message));
+        }
+
+        /**
+         * @param {*} object
+         * @param {*=} customObjectConfig
+         * @return {*}
+         */
+        function wrap(object, customObjectConfig)
+        {
+            return injectedScript._wrapObject(object, objectGroupName, false, false, null, false, false, customObjectConfig);
+        }
+
+        try {
+            var formatters = inspectedGlobalObject["devtoolsFormatters"];
+            if (!formatters || !isArrayLike(formatters))
+                return null;
+
+            for (var i = 0; i < formatters.length; ++i) {
+                try {
+                    var formatted = formatters[i].header(object, customObjectConfig);
+                    if (!formatted)
+                        continue;
+
+                    var hasBody = formatters[i].hasBody(object, customObjectConfig);
+                    injectedScript._substituteObjectTagsInCustomPreview(objectGroupName, formatted);
+                    var formatterObjectId = injectedScript._bind(formatters[i], objectGroupName);
+                    var bindRemoteObjectFunctionId = injectedScript._bind(wrap, objectGroupName);
+                    var result = {header: JSON.stringify(formatted), hasBody: !!hasBody, formatterObjectId: formatterObjectId, bindRemoteObjectFunctionId: bindRemoteObjectFunctionId};
+                    if (customObjectConfig)
+                        result["configObjectId"] = injectedScript._bind(customObjectConfig, objectGroupName);
+                    return result;
+                } catch (e) {
+                    logError(e);
+                }
+            }
+        } catch (e) {
+            logError(e);
+        }
+        return null;
+    },
+
+    /**
+     * @return {!RuntimeAgent.ObjectPreview} preview
+     */
+    _createEmptyPreview: function()
+    {
+        var preview = {
+            type: /** @type {!RuntimeAgent.ObjectPreviewType.<string>} */ (this.type),
+            description: this.description || toStringDescription(this.value),
+            overflow: false,
+            properties: [],
+            __proto__: null
+        };
+        if (this.subtype)
+            preview.subtype = /** @type {!RuntimeAgent.ObjectPreviewSubtype.<string>} */ (this.subtype);
+        return preview;
+    },
+
+    /**
+     * @param {!Object} object
+     * @param {?Array.<string>=} firstLevelKeys
+     * @param {?Array.<string>=} secondLevelKeys
+     * @param {boolean=} isTable
+     * @param {boolean=} skipEntriesPreview
+     * @return {!RuntimeAgent.ObjectPreview} preview
+     */
+    _generatePreview: function(object, firstLevelKeys, secondLevelKeys, isTable, skipEntriesPreview)
+    {
+        var preview = this._createEmptyPreview();
+        var firstLevelKeysCount = firstLevelKeys ? firstLevelKeys.length : 0;
+
+        var propertiesThreshold = {
+            properties: isTable ? 1000 : max(5, firstLevelKeysCount),
+            indexes: isTable ? 1000 : max(100, firstLevelKeysCount),
+            __proto__: null
+        };
+
+        try {
+            var descriptors = injectedScript._propertyDescriptors(object, undefined, undefined, firstLevelKeys);
+
+            this._appendPropertyDescriptors(preview, descriptors, propertiesThreshold, secondLevelKeys, isTable);
+            if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0)
+                return preview;
+
+            // Add internal properties to preview.
+            var rawInternalProperties = InjectedScriptHost.getInternalProperties(object) || [];
+            var internalProperties = [];
+            var entries = null;
+            for (var i = 0; i < rawInternalProperties.length; i += 2) {
+                if (rawInternalProperties[i] === "[[Entries]]") {
+                    entries = /** @type {!Array<*>} */(rawInternalProperties[i + 1]);
+                    continue;
+                }
+                push(internalProperties, {
+                    name: rawInternalProperties[i],
+                    value: rawInternalProperties[i + 1],
+                    isOwn: true,
+                    enumerable: true,
+                    __proto__: null
+                });
+            }
+            this._appendPropertyDescriptors(preview, internalProperties, propertiesThreshold, secondLevelKeys, isTable);
+
+            if (this.subtype === "map" || this.subtype === "set" || this.subtype === "iterator")
+                this._appendEntriesPreview(entries, preview, skipEntriesPreview);
+
+        } catch (e) {}
+
+        return preview;
+    },
+
+    /**
+     * @param {!RuntimeAgent.ObjectPreview} preview
+     * @param {!Array.<*>|!Iterable.<*>} descriptors
+     * @param {!Object} propertiesThreshold
+     * @param {?Array.<string>=} secondLevelKeys
+     * @param {boolean=} isTable
+     */
+    _appendPropertyDescriptors: function(preview, descriptors, propertiesThreshold, secondLevelKeys, isTable)
+    {
+        for (var descriptor of descriptors) {
+            if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0)
+                break;
+            if (!descriptor || descriptor.wasThrown)
+                continue;
+
+            var name = descriptor.name;
+
+            // Ignore __proto__ property.
+            if (name === "__proto__")
+                continue;
+
+            // Ignore length property of array.
+            if ((this.subtype === "array" || this.subtype === "typedarray") && name === "length")
+                continue;
+
+            // Ignore size property of map, set.
+            if ((this.subtype === "map" || this.subtype === "set") && name === "size")
+                continue;
+
+            // Never preview prototype properties.
+            if (!descriptor.isOwn)
+                continue;
+
+            // Ignore computed properties.
+            if (!("value" in descriptor))
+                continue;
+
+            var value = descriptor.value;
+            var type = typeof value;
+
+            // Never render functions in object preview.
+            if (type === "function" && (this.subtype !== "array" || !isUInt32(name)))
+                continue;
+
+            // Special-case HTMLAll.
+            if (type === "undefined" && injectedScript._isHTMLAllCollection(value))
+                type = "object";
+
+            // Render own properties.
+            if (value === null) {
+                this._appendPropertyPreview(preview, { name: name, type: "object", subtype: "null", value: "null", __proto__: null }, propertiesThreshold);
+                continue;
+            }
+
+            var maxLength = 100;
+            if (InjectedScript.primitiveTypes[type]) {
+                if (type === "string" && value.length > maxLength)
+                    value = this._abbreviateString(value, maxLength, true);
+                this._appendPropertyPreview(preview, { name: name, type: type, value: toStringDescription(value), __proto__: null }, propertiesThreshold);
+                continue;
+            }
+
+            var property = { name: name, type: type, __proto__: null };
+            var subtype = injectedScript._subtype(value);
+            if (subtype)
+                property.subtype = subtype;
+
+            if (secondLevelKeys === null || secondLevelKeys) {
+                var subPreview = this._generatePreview(value, secondLevelKeys || undefined, undefined, isTable);
+                property.valuePreview = subPreview;
+                if (subPreview.overflow)
+                    preview.overflow = true;
+            } else {
+                var description = "";
+                if (type !== "function")
+                    description = this._abbreviateString(/** @type {string} */ (injectedScript._describe(value)), maxLength, subtype === "regexp");
+                property.value = description;
+            }
+            this._appendPropertyPreview(preview, property, propertiesThreshold);
+        }
+    },
+
+    /**
+     * @param {!RuntimeAgent.ObjectPreview} preview
+     * @param {!Object} property
+     * @param {!Object} propertiesThreshold
+     */
+    _appendPropertyPreview: function(preview, property, propertiesThreshold)
+    {
+        if (toString(property.name >>> 0) === property.name)
+            propertiesThreshold.indexes--;
+        else
+            propertiesThreshold.properties--;
+        if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0) {
+            preview.overflow = true;
+        } else {
+            push(preview.properties, property);
+        }
+    },
+
+    /**
+     * @param {?Array<*>} entries
+     * @param {!RuntimeAgent.ObjectPreview} preview
+     * @param {boolean=} skipEntriesPreview
+     */
+    _appendEntriesPreview: function(entries, preview, skipEntriesPreview)
+    {
+        if (!entries)
+            return;
+        if (skipEntriesPreview) {
+            if (entries.length)
+                preview.overflow = true;
+            return;
+        }
+        preview.entries = [];
+        var entriesThreshold = 5;
+        for (var i = 0; i < entries.length; ++i) {
+            if (preview.entries.length >= entriesThreshold) {
+                preview.overflow = true;
+                break;
+            }
+            var entry = nullifyObjectProto(entries[i]);
+            var previewEntry = {
+                value: generateValuePreview(entry.value),
+                __proto__: null
+            };
+            if ("key" in entry)
+                previewEntry.key = generateValuePreview(entry.key);
+            push(preview.entries, previewEntry);
+        }
+
+        /**
+         * @param {*} value
+         * @return {!RuntimeAgent.ObjectPreview}
+         */
+        function generateValuePreview(value)
+        {
+            var remoteObject = new InjectedScript.RemoteObject(value, undefined, true, undefined, true, undefined, undefined, true);
+            var valuePreview = remoteObject.preview || remoteObject._createEmptyPreview();
+            return valuePreview;
+        }
+    },
+
+    /**
+     * @param {string} string
+     * @param {number} maxLength
+     * @param {boolean=} middle
+     * @return {string}
+     */
+    _abbreviateString: function(string, maxLength, middle)
+    {
+        if (string.length <= maxLength)
+            return string;
+        if (middle) {
+            var leftHalf = maxLength >> 1;
+            var rightHalf = maxLength - leftHalf - 1;
+            return string.substr(0, leftHalf) + "\u2026" + string.substr(string.length - rightHalf, rightHalf);
+        }
+        return string.substr(0, maxLength) + "\u2026";
+    },
+
+    __proto__: null
+}
+
+return injectedScript;
+})
diff --git a/src/inspector/injected-script.cc b/src/inspector/injected-script.cc
new file mode 100644
index 0000000..a100dea
--- /dev/null
+++ b/src/inspector/injected-script.cc
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/injected-script.h"
+
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/injected-script-source.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-function-call.h"
+#include "src/inspector/v8-injected-script-host.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+using protocol::Array;
+using protocol::Runtime::PropertyDescriptor;
+using protocol::Runtime::InternalPropertyDescriptor;
+using protocol::Runtime::RemoteObject;
+using protocol::Maybe;
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+  if (hasError) *errorString = "Internal error";
+  return hasError;
+}
+
+std::unique_ptr<InjectedScript> InjectedScript::create(
+    InspectedContext* inspectedContext) {
+  v8::Isolate* isolate = inspectedContext->isolate();
+  v8::HandleScope handles(isolate);
+  v8::Local<v8::Context> context = inspectedContext->context();
+  v8::Context::Scope scope(context);
+
+  std::unique_ptr<InjectedScriptNative> injectedScriptNative(
+      new InjectedScriptNative(isolate));
+  v8::Local<v8::Object> scriptHostWrapper =
+      V8InjectedScriptHost::create(context, inspectedContext->inspector());
+  injectedScriptNative->setOnInjectedScriptHost(scriptHostWrapper);
+
+  // Inject javascript into the context. The compiled script is supposed to
+  // evaluate into
+  // a single anonymous function(it's anonymous to avoid cluttering the global
+  // object with
+  // inspector's stuff) the function is called a few lines below with
+  // InjectedScriptHost wrapper,
+  // injected script id and explicit reference to the inspected global object.
+  // The function is expected
+  // to create and configure InjectedScript instance that is going to be used by
+  // the inspector.
+  String16 injectedScriptSource(
+      reinterpret_cast<const char*>(InjectedScriptSource_js),
+      sizeof(InjectedScriptSource_js));
+  v8::Local<v8::Value> value;
+  if (!inspectedContext->inspector()
+           ->compileAndRunInternalScript(
+               context, toV8String(isolate, injectedScriptSource))
+           .ToLocal(&value))
+    return nullptr;
+  DCHECK(value->IsFunction());
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
+  v8::Local<v8::Object> windowGlobal = context->Global();
+  v8::Local<v8::Value> info[] = {
+      scriptHostWrapper, windowGlobal,
+      v8::Number::New(isolate, inspectedContext->contextId())};
+  v8::MicrotasksScope microtasksScope(isolate,
+                                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+  int contextGroupId = inspectedContext->contextGroupId();
+  int contextId = inspectedContext->contextId();
+  V8InspectorImpl* inspector = inspectedContext->inspector();
+  v8::Local<v8::Value> injectedScriptValue;
+  if (!function->Call(context, windowGlobal, arraysize(info), info)
+           .ToLocal(&injectedScriptValue))
+    return nullptr;
+  if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
+    return nullptr;
+  if (!injectedScriptValue->IsObject()) return nullptr;
+  return wrapUnique(new InjectedScript(inspectedContext,
+                                       injectedScriptValue.As<v8::Object>(),
+                                       std::move(injectedScriptNative)));
+}
+
+InjectedScript::InjectedScript(
+    InspectedContext* context, v8::Local<v8::Object> object,
+    std::unique_ptr<InjectedScriptNative> injectedScriptNative)
+    : m_context(context),
+      m_value(context->isolate(), object),
+      m_native(std::move(injectedScriptNative)) {}
+
+InjectedScript::~InjectedScript() {}
+
+void InjectedScript::getProperties(
+    ErrorString* errorString, v8::Local<v8::Object> object,
+    const String16& groupName, bool ownProperties, bool accessorPropertiesOnly,
+    bool generatePreview,
+    std::unique_ptr<Array<PropertyDescriptor>>* properties,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+  v8::HandleScope handles(m_context->isolate());
+  v8::Local<v8::Context> context = m_context->context();
+  V8FunctionCall function(m_context->inspector(), m_context->context(),
+                          v8Value(), "getProperties");
+  function.appendArgument(object);
+  function.appendArgument(groupName);
+  function.appendArgument(ownProperties);
+  function.appendArgument(accessorPropertiesOnly);
+  function.appendArgument(generatePreview);
+
+  v8::TryCatch tryCatch(m_context->isolate());
+  v8::Local<v8::Value> resultValue = function.callWithoutExceptionHandling();
+  if (tryCatch.HasCaught()) {
+    *exceptionDetails = createExceptionDetails(errorString, tryCatch, groupName,
+                                               generatePreview);
+    // FIXME: make properties optional
+    *properties = Array<PropertyDescriptor>::create();
+    return;
+  }
+  if (hasInternalError(errorString, resultValue.IsEmpty())) return;
+  std::unique_ptr<protocol::Value> protocolValue =
+      toProtocolValue(errorString, context, resultValue);
+  if (!protocolValue) return;
+  protocol::ErrorSupport errors(errorString);
+  std::unique_ptr<Array<PropertyDescriptor>> result =
+      Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
+  if (!hasInternalError(errorString, errors.hasErrors()))
+    *properties = std::move(result);
+}
+
+void InjectedScript::releaseObject(const String16& objectId) {
+  std::unique_ptr<protocol::Value> parsedObjectId =
+      protocol::parseJSON(objectId);
+  if (!parsedObjectId) return;
+  protocol::DictionaryValue* object =
+      protocol::DictionaryValue::cast(parsedObjectId.get());
+  if (!object) return;
+  int boundId = 0;
+  if (!object->getInteger("id", &boundId)) return;
+  m_native->unbind(boundId);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapObject(
+    ErrorString* errorString, v8::Local<v8::Value> value,
+    const String16& groupName, bool forceValueType,
+    bool generatePreview) const {
+  v8::HandleScope handles(m_context->isolate());
+  v8::Local<v8::Value> wrappedObject;
+  v8::Local<v8::Context> context = m_context->context();
+  if (!wrapValue(errorString, value, groupName, forceValueType, generatePreview)
+           .ToLocal(&wrappedObject))
+    return nullptr;
+  protocol::ErrorSupport errors;
+  std::unique_ptr<protocol::Value> protocolValue =
+      toProtocolValue(errorString, context, wrappedObject);
+  if (!protocolValue) return nullptr;
+  std::unique_ptr<protocol::Runtime::RemoteObject> remoteObject =
+      protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+  if (!remoteObject) *errorString = errors.errors();
+  return remoteObject;
+}
+
+bool InjectedScript::wrapObjectProperty(ErrorString* errorString,
+                                        v8::Local<v8::Object> object,
+                                        v8::Local<v8::Name> key,
+                                        const String16& groupName,
+                                        bool forceValueType,
+                                        bool generatePreview) const {
+  v8::Local<v8::Value> property;
+  v8::Local<v8::Context> context = m_context->context();
+  if (hasInternalError(errorString,
+                       !object->Get(context, key).ToLocal(&property)))
+    return false;
+  v8::Local<v8::Value> wrappedProperty;
+  if (!wrapValue(errorString, property, groupName, forceValueType,
+                 generatePreview)
+           .ToLocal(&wrappedProperty))
+    return false;
+  v8::Maybe<bool> success =
+      createDataProperty(context, object, key, wrappedProperty);
+  if (hasInternalError(errorString, success.IsNothing() || !success.FromJust()))
+    return false;
+  return true;
+}
+
+bool InjectedScript::wrapPropertyInArray(ErrorString* errorString,
+                                         v8::Local<v8::Array> array,
+                                         v8::Local<v8::String> property,
+                                         const String16& groupName,
+                                         bool forceValueType,
+                                         bool generatePreview) const {
+  V8FunctionCall function(m_context->inspector(), m_context->context(),
+                          v8Value(), "wrapPropertyInArray");
+  function.appendArgument(array);
+  function.appendArgument(property);
+  function.appendArgument(groupName);
+  function.appendArgument(forceValueType);
+  function.appendArgument(generatePreview);
+  bool hadException = false;
+  function.call(hadException);
+  return !hasInternalError(errorString, hadException);
+}
+
+bool InjectedScript::wrapObjectsInArray(ErrorString* errorString,
+                                        v8::Local<v8::Array> array,
+                                        const String16& groupName,
+                                        bool forceValueType,
+                                        bool generatePreview) const {
+  V8FunctionCall function(m_context->inspector(), m_context->context(),
+                          v8Value(), "wrapObjectsInArray");
+  function.appendArgument(array);
+  function.appendArgument(groupName);
+  function.appendArgument(forceValueType);
+  function.appendArgument(generatePreview);
+  bool hadException = false;
+  function.call(hadException);
+  return !hasInternalError(errorString, hadException);
+}
+
+v8::MaybeLocal<v8::Value> InjectedScript::wrapValue(
+    ErrorString* errorString, v8::Local<v8::Value> value,
+    const String16& groupName, bool forceValueType,
+    bool generatePreview) const {
+  V8FunctionCall function(m_context->inspector(), m_context->context(),
+                          v8Value(), "wrapObject");
+  function.appendArgument(value);
+  function.appendArgument(groupName);
+  function.appendArgument(forceValueType);
+  function.appendArgument(generatePreview);
+  bool hadException = false;
+  v8::Local<v8::Value> r = function.call(hadException);
+  if (hasInternalError(errorString, hadException || r.IsEmpty()))
+    return v8::MaybeLocal<v8::Value>();
+  return r;
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
+    v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const {
+  v8::HandleScope handles(m_context->isolate());
+  v8::Local<v8::Context> context = m_context->context();
+  V8FunctionCall function(m_context->inspector(), context, v8Value(),
+                          "wrapTable");
+  function.appendArgument(table);
+  if (columns.IsEmpty())
+    function.appendArgument(false);
+  else
+    function.appendArgument(columns);
+  bool hadException = false;
+  v8::Local<v8::Value> r = function.call(hadException);
+  if (hadException || r.IsEmpty()) return nullptr;
+  protocol::ErrorString errorString;
+  std::unique_ptr<protocol::Value> protocolValue =
+      toProtocolValue(&errorString, context, r);
+  if (!protocolValue) return nullptr;
+  protocol::ErrorSupport errors;
+  return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+}
+
+bool InjectedScript::findObject(ErrorString* errorString,
+                                const RemoteObjectId& objectId,
+                                v8::Local<v8::Value>* outObject) const {
+  *outObject = m_native->objectForId(objectId.id());
+  if (outObject->IsEmpty())
+    *errorString = "Could not find object with given id";
+  return !outObject->IsEmpty();
+}
+
+String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
+  return m_native->groupName(objectId.id());
+}
+
+void InjectedScript::releaseObjectGroup(const String16& objectGroup) {
+  m_native->releaseObjectGroup(objectGroup);
+  if (objectGroup == "console") m_lastEvaluationResult.Reset();
+}
+
+void InjectedScript::setCustomObjectFormatterEnabled(bool enabled) {
+  v8::HandleScope handles(m_context->isolate());
+  V8FunctionCall function(m_context->inspector(), m_context->context(),
+                          v8Value(), "setCustomObjectFormatterEnabled");
+  function.appendArgument(enabled);
+  bool hadException = false;
+  function.call(hadException);
+  DCHECK(!hadException);
+}
+
+v8::Local<v8::Value> InjectedScript::v8Value() const {
+  return m_value.Get(m_context->isolate());
+}
+
+v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
+  if (m_lastEvaluationResult.IsEmpty())
+    return v8::Undefined(m_context->isolate());
+  return m_lastEvaluationResult.Get(m_context->isolate());
+}
+
+v8::MaybeLocal<v8::Value> InjectedScript::resolveCallArgument(
+    ErrorString* errorString, protocol::Runtime::CallArgument* callArgument) {
+  if (callArgument->hasObjectId()) {
+    std::unique_ptr<RemoteObjectId> remoteObjectId =
+        RemoteObjectId::parse(errorString, callArgument->getObjectId(""));
+    if (!remoteObjectId) return v8::MaybeLocal<v8::Value>();
+    if (remoteObjectId->contextId() != m_context->contextId()) {
+      *errorString =
+          "Argument should belong to the same JavaScript world as target "
+          "object";
+      return v8::MaybeLocal<v8::Value>();
+    }
+    v8::Local<v8::Value> object;
+    if (!findObject(errorString, *remoteObjectId, &object))
+      return v8::MaybeLocal<v8::Value>();
+    return object;
+  }
+  if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
+    String16 value =
+        callArgument->hasValue()
+            ? callArgument->getValue(nullptr)->toJSONString()
+            : "Number(\"" + callArgument->getUnserializableValue("") + "\")";
+    v8::Local<v8::Value> object;
+    if (!m_context->inspector()
+             ->compileAndRunInternalScript(
+                 m_context->context(), toV8String(m_context->isolate(), value))
+             .ToLocal(&object)) {
+      *errorString = "Couldn't parse value object in call argument";
+      return v8::MaybeLocal<v8::Value>();
+    }
+    return object;
+  }
+  return v8::Undefined(m_context->isolate());
+}
+
+std::unique_ptr<protocol::Runtime::ExceptionDetails>
+InjectedScript::createExceptionDetails(ErrorString* errorString,
+                                       const v8::TryCatch& tryCatch,
+                                       const String16& objectGroup,
+                                       bool generatePreview) {
+  if (!tryCatch.HasCaught()) return nullptr;
+  v8::Local<v8::Message> message = tryCatch.Message();
+  v8::Local<v8::Value> exception = tryCatch.Exception();
+  String16 messageText =
+      message.IsEmpty() ? String16() : toProtocolString(message->Get());
+  std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+      protocol::Runtime::ExceptionDetails::create()
+          .setExceptionId(m_context->inspector()->nextExceptionId())
+          .setText(exception.IsEmpty() ? messageText : String16("Uncaught"))
+          .setLineNumber(
+              message.IsEmpty()
+                  ? 0
+                  : message->GetLineNumber(m_context->context()).FromMaybe(1) -
+                        1)
+          .setColumnNumber(
+              message.IsEmpty()
+                  ? 0
+                  : message->GetStartColumn(m_context->context()).FromMaybe(0))
+          .build();
+  if (!message.IsEmpty()) {
+    exceptionDetails->setScriptId(String16::fromInteger(
+        static_cast<int>(message->GetScriptOrigin().ScriptID()->Value())));
+    v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
+    if (!stackTrace.IsEmpty() && stackTrace->GetFrameCount() > 0)
+      exceptionDetails->setStackTrace(m_context->inspector()
+                                          ->debugger()
+                                          ->createStackTrace(stackTrace)
+                                          ->buildInspectorObjectImpl());
+  }
+  if (!exception.IsEmpty()) {
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrapped = wrapObject(
+        errorString, exception, objectGroup, false /* forceValueType */,
+        generatePreview && !exception->IsNativeError());
+    if (!wrapped) return nullptr;
+    exceptionDetails->setException(std::move(wrapped));
+  }
+  return exceptionDetails;
+}
+
+void InjectedScript::wrapEvaluateResult(
+    ErrorString* errorString, v8::MaybeLocal<v8::Value> maybeResultValue,
+    const v8::TryCatch& tryCatch, const String16& objectGroup,
+    bool returnByValue, bool generatePreview,
+    std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+  v8::Local<v8::Value> resultValue;
+  if (!tryCatch.HasCaught()) {
+    if (hasInternalError(errorString, !maybeResultValue.ToLocal(&resultValue)))
+      return;
+    std::unique_ptr<RemoteObject> remoteObject = wrapObject(
+        errorString, resultValue, objectGroup, returnByValue, generatePreview);
+    if (!remoteObject) return;
+    if (objectGroup == "console")
+      m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
+    *result = std::move(remoteObject);
+  } else {
+    v8::Local<v8::Value> exception = tryCatch.Exception();
+    std::unique_ptr<RemoteObject> remoteObject =
+        wrapObject(errorString, exception, objectGroup, false,
+                   generatePreview && !exception->IsNativeError());
+    if (!remoteObject) return;
+    // We send exception in result for compatibility reasons, even though it's
+    // accessible through exceptionDetails.exception.
+    *result = std::move(remoteObject);
+    *exceptionDetails = createExceptionDetails(errorString, tryCatch,
+                                               objectGroup, generatePreview);
+  }
+}
+
+v8::Local<v8::Object> InjectedScript::commandLineAPI() {
+  if (m_commandLineAPI.IsEmpty())
+    m_commandLineAPI.Reset(m_context->isolate(),
+                           V8Console::createCommandLineAPI(m_context));
+  return m_commandLineAPI.Get(m_context->isolate());
+}
+
+InjectedScript::Scope::Scope(ErrorString* errorString,
+                             V8InspectorImpl* inspector, int contextGroupId)
+    : m_errorString(errorString),
+      m_inspector(inspector),
+      m_contextGroupId(contextGroupId),
+      m_injectedScript(nullptr),
+      m_handleScope(inspector->isolate()),
+      m_tryCatch(inspector->isolate()),
+      m_ignoreExceptionsAndMuteConsole(false),
+      m_previousPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions),
+      m_userGesture(false) {}
+
+bool InjectedScript::Scope::initialize() {
+  cleanup();
+  // TODO(dgozman): what if we reattach to the same context group during
+  // evaluate? Introduce a session id?
+  V8InspectorSessionImpl* session =
+      m_inspector->sessionForContextGroup(m_contextGroupId);
+  if (!session) {
+    *m_errorString = "Internal error";
+    return false;
+  }
+  findInjectedScript(session);
+  if (!m_injectedScript) return false;
+  m_context = m_injectedScript->context()->context();
+  m_context->Enter();
+  return true;
+}
+
+bool InjectedScript::Scope::installCommandLineAPI() {
+  DCHECK(m_injectedScript && !m_context.IsEmpty() &&
+         !m_commandLineAPIScope.get());
+  m_commandLineAPIScope.reset(new V8Console::CommandLineAPIScope(
+      m_context, m_injectedScript->commandLineAPI(), m_context->Global()));
+  return true;
+}
+
+void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
+  DCHECK(!m_ignoreExceptionsAndMuteConsole);
+  m_ignoreExceptionsAndMuteConsole = true;
+  m_inspector->client()->muteMetrics(m_contextGroupId);
+  m_inspector->muteExceptions(m_contextGroupId);
+  m_previousPauseOnExceptionsState =
+      setPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions);
+}
+
+V8Debugger::PauseOnExceptionsState
+InjectedScript::Scope::setPauseOnExceptionsState(
+    V8Debugger::PauseOnExceptionsState newState) {
+  if (!m_inspector->debugger()->enabled()) return newState;
+  V8Debugger::PauseOnExceptionsState presentState =
+      m_inspector->debugger()->getPauseOnExceptionsState();
+  if (presentState != newState)
+    m_inspector->debugger()->setPauseOnExceptionsState(newState);
+  return presentState;
+}
+
+void InjectedScript::Scope::pretendUserGesture() {
+  DCHECK(!m_userGesture);
+  m_userGesture = true;
+  m_inspector->client()->beginUserGesture();
+}
+
+void InjectedScript::Scope::cleanup() {
+  m_commandLineAPIScope.reset();
+  if (!m_context.IsEmpty()) {
+    m_context->Exit();
+    m_context.Clear();
+  }
+}
+
+InjectedScript::Scope::~Scope() {
+  if (m_ignoreExceptionsAndMuteConsole) {
+    setPauseOnExceptionsState(m_previousPauseOnExceptionsState);
+    m_inspector->client()->unmuteMetrics(m_contextGroupId);
+    m_inspector->unmuteExceptions(m_contextGroupId);
+  }
+  if (m_userGesture) m_inspector->client()->endUserGesture();
+  cleanup();
+}
+
+InjectedScript::ContextScope::ContextScope(ErrorString* errorString,
+                                           V8InspectorImpl* inspector,
+                                           int contextGroupId,
+                                           int executionContextId)
+    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+      m_executionContextId(executionContextId) {}
+
+InjectedScript::ContextScope::~ContextScope() {}
+
+void InjectedScript::ContextScope::findInjectedScript(
+    V8InspectorSessionImpl* session) {
+  m_injectedScript =
+      session->findInjectedScript(m_errorString, m_executionContextId);
+}
+
+InjectedScript::ObjectScope::ObjectScope(ErrorString* errorString,
+                                         V8InspectorImpl* inspector,
+                                         int contextGroupId,
+                                         const String16& remoteObjectId)
+    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+      m_remoteObjectId(remoteObjectId) {}
+
+InjectedScript::ObjectScope::~ObjectScope() {}
+
+void InjectedScript::ObjectScope::findInjectedScript(
+    V8InspectorSessionImpl* session) {
+  std::unique_ptr<RemoteObjectId> remoteId =
+      RemoteObjectId::parse(m_errorString, m_remoteObjectId);
+  if (!remoteId) return;
+  InjectedScript* injectedScript =
+      session->findInjectedScript(m_errorString, remoteId.get());
+  if (!injectedScript) return;
+  m_objectGroupName = injectedScript->objectGroupName(*remoteId);
+  if (!injectedScript->findObject(m_errorString, *remoteId, &m_object)) return;
+  m_injectedScript = injectedScript;
+}
+
+InjectedScript::CallFrameScope::CallFrameScope(ErrorString* errorString,
+                                               V8InspectorImpl* inspector,
+                                               int contextGroupId,
+                                               const String16& remoteObjectId)
+    : InjectedScript::Scope(errorString, inspector, contextGroupId),
+      m_remoteCallFrameId(remoteObjectId) {}
+
+InjectedScript::CallFrameScope::~CallFrameScope() {}
+
+void InjectedScript::CallFrameScope::findInjectedScript(
+    V8InspectorSessionImpl* session) {
+  std::unique_ptr<RemoteCallFrameId> remoteId =
+      RemoteCallFrameId::parse(m_errorString, m_remoteCallFrameId);
+  if (!remoteId) return;
+  m_frameOrdinal = static_cast<size_t>(remoteId->frameOrdinal());
+  m_injectedScript = session->findInjectedScript(m_errorString, remoteId.get());
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/injected-script.h b/src/inspector/injected-script.h
new file mode 100644
index 0000000..9b324c9
--- /dev/null
+++ b/src/inspector/injected-script.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_INJECTEDSCRIPT_H_
+#define V8_INSPECTOR_INJECTEDSCRIPT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-debugger.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class RemoteObjectId;
+class V8FunctionCall;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class InjectedScript final {
+ public:
+  static std::unique_ptr<InjectedScript> create(InspectedContext*);
+  ~InjectedScript();
+
+  InspectedContext* context() const { return m_context; }
+
+  void getProperties(
+      ErrorString*, v8::Local<v8::Object>, const String16& groupName,
+      bool ownProperties, bool accessorPropertiesOnly, bool generatePreview,
+      std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+          result,
+      Maybe<protocol::Runtime::ExceptionDetails>*);
+  void releaseObject(const String16& objectId);
+
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+      ErrorString*, v8::Local<v8::Value>, const String16& groupName,
+      bool forceValueType = false, bool generatePreview = false) const;
+  bool wrapObjectProperty(ErrorString*, v8::Local<v8::Object>,
+                          v8::Local<v8::Name> key, const String16& groupName,
+                          bool forceValueType = false,
+                          bool generatePreview = false) const;
+  bool wrapPropertyInArray(ErrorString*, v8::Local<v8::Array>,
+                           v8::Local<v8::String> property,
+                           const String16& groupName,
+                           bool forceValueType = false,
+                           bool generatePreview = false) const;
+  bool wrapObjectsInArray(ErrorString*, v8::Local<v8::Array>,
+                          const String16& groupName,
+                          bool forceValueType = false,
+                          bool generatePreview = false) const;
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
+      v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
+
+  bool findObject(ErrorString*, const RemoteObjectId&,
+                  v8::Local<v8::Value>*) const;
+  String16 objectGroupName(const RemoteObjectId&) const;
+  void releaseObjectGroup(const String16&);
+  void setCustomObjectFormatterEnabled(bool);
+  v8::MaybeLocal<v8::Value> resolveCallArgument(
+      ErrorString*, protocol::Runtime::CallArgument*);
+
+  std::unique_ptr<protocol::Runtime::ExceptionDetails> createExceptionDetails(
+      ErrorString*, const v8::TryCatch&, const String16& groupName,
+      bool generatePreview);
+  void wrapEvaluateResult(
+      ErrorString*, v8::MaybeLocal<v8::Value> maybeResultValue,
+      const v8::TryCatch&, const String16& objectGroup, bool returnByValue,
+      bool generatePreview,
+      std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+      Maybe<protocol::Runtime::ExceptionDetails>*);
+  v8::Local<v8::Value> lastEvaluationResult() const;
+
+  class Scope {
+   public:
+    bool initialize();
+    bool installCommandLineAPI();
+    void ignoreExceptionsAndMuteConsole();
+    void pretendUserGesture();
+    v8::Local<v8::Context> context() const { return m_context; }
+    InjectedScript* injectedScript() const { return m_injectedScript; }
+    const v8::TryCatch& tryCatch() const { return m_tryCatch; }
+
+   protected:
+    Scope(ErrorString*, V8InspectorImpl*, int contextGroupId);
+    virtual ~Scope();
+    virtual void findInjectedScript(V8InspectorSessionImpl*) = 0;
+
+    ErrorString* m_errorString;
+    V8InspectorImpl* m_inspector;
+    int m_contextGroupId;
+    InjectedScript* m_injectedScript;
+
+   private:
+    void cleanup();
+    V8Debugger::PauseOnExceptionsState setPauseOnExceptionsState(
+        V8Debugger::PauseOnExceptionsState);
+
+    v8::HandleScope m_handleScope;
+    v8::TryCatch m_tryCatch;
+    v8::Local<v8::Context> m_context;
+    std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
+    bool m_ignoreExceptionsAndMuteConsole;
+    V8Debugger::PauseOnExceptionsState m_previousPauseOnExceptionsState;
+    bool m_userGesture;
+  };
+
+  class ContextScope : public Scope {
+   public:
+    ContextScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+                 int executionContextId);
+    ~ContextScope();
+
+   private:
+    void findInjectedScript(V8InspectorSessionImpl*) override;
+    int m_executionContextId;
+
+    DISALLOW_COPY_AND_ASSIGN(ContextScope);
+  };
+
+  class ObjectScope : public Scope {
+   public:
+    ObjectScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+                const String16& remoteObjectId);
+    ~ObjectScope();
+    const String16& objectGroupName() const { return m_objectGroupName; }
+    v8::Local<v8::Value> object() const { return m_object; }
+
+   private:
+    void findInjectedScript(V8InspectorSessionImpl*) override;
+    String16 m_remoteObjectId;
+    String16 m_objectGroupName;
+    v8::Local<v8::Value> m_object;
+
+    DISALLOW_COPY_AND_ASSIGN(ObjectScope);
+  };
+
+  class CallFrameScope : public Scope {
+   public:
+    CallFrameScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+                   const String16& remoteCallFrameId);
+    ~CallFrameScope();
+    size_t frameOrdinal() const { return m_frameOrdinal; }
+
+   private:
+    void findInjectedScript(V8InspectorSessionImpl*) override;
+    String16 m_remoteCallFrameId;
+    size_t m_frameOrdinal;
+
+    DISALLOW_COPY_AND_ASSIGN(CallFrameScope);
+  };
+
+ private:
+  InjectedScript(InspectedContext*, v8::Local<v8::Object>,
+                 std::unique_ptr<InjectedScriptNative>);
+  v8::Local<v8::Value> v8Value() const;
+  v8::MaybeLocal<v8::Value> wrapValue(ErrorString*, v8::Local<v8::Value>,
+                                      const String16& groupName,
+                                      bool forceValueType,
+                                      bool generatePreview) const;
+  v8::Local<v8::Object> commandLineAPI();
+
+  InspectedContext* m_context;
+  v8::Global<v8::Value> m_value;
+  v8::Global<v8::Value> m_lastEvaluationResult;
+  std::unique_ptr<InjectedScriptNative> m_native;
+  v8::Global<v8::Object> m_commandLineAPI;
+
+  DISALLOW_COPY_AND_ASSIGN(InjectedScript);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_INJECTEDSCRIPT_H_
diff --git a/src/inspector/injected_script_externs.js b/src/inspector/injected_script_externs.js
new file mode 100644
index 0000000..b6339c6
--- /dev/null
+++ b/src/inspector/injected_script_externs.js
@@ -0,0 +1,66 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/** @interface */
+function InjectedScriptHostClass()
+{
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+InjectedScriptHostClass.prototype.internalConstructorName = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @param {function()|undefined} func
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.formatAccessorsAsProperties = function(obj, func) {}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+InjectedScriptHostClass.prototype.subtype = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.isTypedArray = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @return {!Array.<*>}
+ */
+InjectedScriptHostClass.prototype.getInternalProperties = function(obj) {}
+
+/**
+ * @param {!Object} object
+ * @param {string} propertyName
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.objectHasOwnProperty = function(object, propertyName) {}
+
+/**
+ * @param {*} value
+ * @param {string} groupName
+ * @return {number}
+ */
+InjectedScriptHostClass.prototype.bind = function(value, groupName) {}
+
+/**
+ * @param {!Object} object
+ * @return {!Object}
+ */
+InjectedScriptHostClass.prototype.proxyTargetValue = function(object) {}
+
+/** @type {!InjectedScriptHostClass} */
+var InjectedScriptHost;
+/** @type {!Window} */
+var inspectedGlobalObject;
+/** @type {number} */
+var injectedScriptId;
diff --git a/src/inspector/inspected-context.cc b/src/inspector/inspected-context.cc
new file mode 100644
index 0000000..9100f64
--- /dev/null
+++ b/src/inspector/inspected-context.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/inspected-context.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+void InspectedContext::weakCallback(
+    const v8::WeakCallbackInfo<InspectedContext>& data) {
+  InspectedContext* context = data.GetParameter();
+  if (!context->m_context.IsEmpty()) {
+    context->m_context.Reset();
+    data.SetSecondPassCallback(&InspectedContext::weakCallback);
+  } else {
+    context->m_inspector->discardInspectedContext(context->m_contextGroupId,
+                                                  context->m_contextId);
+  }
+}
+
+void InspectedContext::consoleWeakCallback(
+    const v8::WeakCallbackInfo<InspectedContext>& data) {
+  data.GetParameter()->m_console.Reset();
+}
+
+InspectedContext::InspectedContext(V8InspectorImpl* inspector,
+                                   const V8ContextInfo& info, int contextId)
+    : m_inspector(inspector),
+      m_context(info.context->GetIsolate(), info.context),
+      m_contextId(contextId),
+      m_contextGroupId(info.contextGroupId),
+      m_origin(toString16(info.origin)),
+      m_humanReadableName(toString16(info.humanReadableName)),
+      m_auxData(toString16(info.auxData)),
+      m_reported(false) {
+  m_context.SetWeak(this, &InspectedContext::weakCallback,
+                    v8::WeakCallbackType::kParameter);
+
+  v8::Isolate* isolate = m_inspector->isolate();
+  v8::Local<v8::Object> global = info.context->Global();
+  v8::Local<v8::Object> console =
+      V8Console::createConsole(this, info.hasMemoryOnConsole);
+  if (!global
+           ->Set(info.context, toV8StringInternalized(isolate, "console"),
+                 console)
+           .FromMaybe(false))
+    return;
+  m_console.Reset(isolate, console);
+  m_console.SetWeak(this, &InspectedContext::consoleWeakCallback,
+                    v8::WeakCallbackType::kParameter);
+}
+
+InspectedContext::~InspectedContext() {
+  if (!m_context.IsEmpty() && !m_console.IsEmpty()) {
+    v8::HandleScope scope(isolate());
+    V8Console::clearInspectedContextIfNeeded(context(),
+                                             m_console.Get(isolate()));
+  }
+}
+
+v8::Local<v8::Context> InspectedContext::context() const {
+  return m_context.Get(isolate());
+}
+
+v8::Isolate* InspectedContext::isolate() const {
+  return m_inspector->isolate();
+}
+
+bool InspectedContext::createInjectedScript() {
+  DCHECK(!m_injectedScript);
+  std::unique_ptr<InjectedScript> injectedScript = InjectedScript::create(this);
+  // InjectedScript::create can destroy |this|.
+  if (!injectedScript) return false;
+  m_injectedScript = std::move(injectedScript);
+  return true;
+}
+
+void InspectedContext::discardInjectedScript() { m_injectedScript.reset(); }
+
+}  // namespace v8_inspector
diff --git a/src/inspector/inspected-context.h b/src/inspector/inspected-context.h
new file mode 100644
index 0000000..d8e72cc
--- /dev/null
+++ b/src/inspector/inspected-context.h
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#define V8_INSPECTOR_INSPECTEDCONTEXT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class InjectedScriptHost;
+class V8ContextInfo;
+class V8InspectorImpl;
+
+class InspectedContext {
+ public:
+  ~InspectedContext();
+
+  v8::Local<v8::Context> context() const;
+  int contextId() const { return m_contextId; }
+  int contextGroupId() const { return m_contextGroupId; }
+  String16 origin() const { return m_origin; }
+  String16 humanReadableName() const { return m_humanReadableName; }
+  String16 auxData() const { return m_auxData; }
+
+  bool isReported() const { return m_reported; }
+  void setReported(bool reported) { m_reported = reported; }
+
+  v8::Isolate* isolate() const;
+  V8InspectorImpl* inspector() const { return m_inspector; }
+
+  InjectedScript* getInjectedScript() { return m_injectedScript.get(); }
+  bool createInjectedScript();
+  void discardInjectedScript();
+
+ private:
+  friend class V8InspectorImpl;
+  InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
+  static void weakCallback(const v8::WeakCallbackInfo<InspectedContext>&);
+  static void consoleWeakCallback(
+      const v8::WeakCallbackInfo<InspectedContext>&);
+
+  V8InspectorImpl* m_inspector;
+  v8::Global<v8::Context> m_context;
+  int m_contextId;
+  int m_contextGroupId;
+  const String16 m_origin;
+  const String16 m_humanReadableName;
+  const String16 m_auxData;
+  bool m_reported;
+  std::unique_ptr<InjectedScript> m_injectedScript;
+  v8::Global<v8::Object> m_console;
+
+  DISALLOW_COPY_AND_ASSIGN(InspectedContext);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_INSPECTEDCONTEXT_H_
diff --git a/src/inspector/inspector.gyp b/src/inspector/inspector.gyp
index 5fc49b1..2d5c7a5 100644
--- a/src/inspector/inspector.gyp
+++ b/src/inspector/inspector.gyp
@@ -2,112 +2,108 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-{ 'variables': {
-    'protocol_path': '../../third_party/WebKit/Source/platform/inspector_protocol',
-    'protocol_sources': [
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.cpp',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.cpp',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.cpp',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.cpp',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Debugger.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Runtime.h',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.cpp',
-      '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.h',
-    ]
+{
+  'variables': {
+    'protocol_path': '<(PRODUCT_DIR)/../../third_party/WebKit/Source/platform/inspector_protocol',
   },
+  'includes': [
+    'inspector.gypi',
+    '<(PRODUCT_DIR)/../../../third_party/WebKit/Source/platform/inspector_protocol/inspector_protocol.gypi',
+  ],
   'targets': [
-    { 'target_name': 'inspector_protocol_sources',
+    { 'target_name': 'inspector_injected_script',
       'type': 'none',
-      'variables': {
-        'jinja_module_files': [
-          # jinja2/__init__.py contains version string, so sufficient for package
-          '../third_party/jinja2/__init__.py',
-          '../third_party/markupsafe/__init__.py',  # jinja2 dep
-        ]
-      },
       'actions': [
         {
-          'action_name': 'generate_inspector_protocol_sources',
+          'action_name': 'convert_js_to_cpp_char_array',
           'inputs': [
-            # Source generator script.
-            '<(protocol_path)/CodeGenerator.py',
-            # Source code templates.
-            '<(protocol_path)/Exported_h.template',
-            '<(protocol_path)/Imported_h.template',
-            '<(protocol_path)/TypeBuilder_h.template',
-            '<(protocol_path)/TypeBuilder_cpp.template',
-            # Protocol definition.
+            'build/xxd.py',
+            '<(inspector_injected_script_source)',
+          ],
+          'outputs': [
+            '<(inspector_generated_injected_script)',
+          ],
+          'action': [
+            'python',
+            'build/xxd.py',
+            'InjectedScriptSource_js',
+            'injected-script-source.js',
+            '<@(_outputs)'
+          ],
+        },
+      ],
+      # Since this target generates header files, it needs to be a hard dependency.
+      'hard_dependency': 1,
+    },
+    { 'target_name': 'inspector_debugger_script',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'convert_js_to_cpp_char_array',
+          'inputs': [
+            'build/xxd.py',
+            '<(inspector_debugger_script_source)',
+          ],
+          'outputs': [
+            '<(inspector_generated_debugger_script)',
+          ],
+          'action': [
+            'python',
+            'build/xxd.py',
+            'DebuggerScript_js',
+            'debugger-script.js',
+            '<@(_outputs)'
+          ],
+        },
+      ],
+      # Since this target generates header files, it needs to be a hard dependency.
+      'hard_dependency': 1,
+    },
+    { 'target_name': 'protocol_compatibility',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'protocol_compatibility',
+          'inputs': [
             'js_protocol.json',
           ],
           'outputs': [
-            '<@(protocol_sources)',
+            '<@(SHARED_INTERMEDIATE_DIR)/src/js_protocol.stamp',
+          ],
+          'action': [
+            'python',
+            '<(protocol_path)/CheckProtocolCompatibility.py',
+            '--stamp', '<@(_outputs)',
+            'js_protocol.json',
+          ],
+          'message': 'Generating inspector protocol sources from protocol json definition',
+        },
+      ]
+    },
+    { 'target_name': 'protocol_generated_sources',
+      'type': 'none',
+      'dependencies': [ 'protocol_compatibility' ],
+      'actions': [
+        {
+          'action_name': 'protocol_generated_sources',
+          'inputs': [
+            'js_protocol.json',
+            'inspector_protocol_config.json',
+            '<@(inspector_protocol_files)',
+          ],
+          'outputs': [
+            '<@(inspector_generated_sources)',
           ],
           'action': [
             'python',
             '<(protocol_path)/CodeGenerator.py',
-            '--protocol', 'js_protocol.json',
-            '--string_type', 'String16',
-            '--export_macro', 'PLATFORM_EXPORT',
-            '--output_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector',
-            '--output_package', 'inspector',
-            '--exported_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector/public',
-            '--exported_package', 'inspector/public',
+            '--jinja_dir', '<(PRODUCT_DIR)/../../third_party',
+            '--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
+            '--config', 'inspector_protocol_config.json',
           ],
-          'message': 'Generating Inspector protocol backend sources from json definitions',
+          'message': 'Generating inspector protocol sources from protocol json',
         },
       ]
     },
-    { 'target_name': 'inspector_protocol',
-      'type': 'static_library',
-      'dependencies': [
-        'inspector_protocol_sources',
-      ],
-      'include_dirs+': [
-        '<(protocol_path)/../..',
-        '<(SHARED_INTERMEDIATE_DIR)',
-      ],
-      'defines': [
-        'V8_INSPECTOR_USE_STL',
-      ],
-      'msvs_disabled_warnings': [
-        4267,  # Truncation from size_t to int.
-        4305,  # Truncation from 'type1' to 'type2'.
-        4324,  # Struct padded due to declspec(align).
-        4714,  # Function marked forceinline not inlined.
-        4800,  # Value forced to bool.
-        4996,  # Deprecated function call.
-      ],
-      'sources': [
-        '<@(protocol_sources)',
-        '<(protocol_path)/Allocator.h',
-        '<(protocol_path)/Array.h',
-        '<(protocol_path)/BackendCallback.h',
-        '<(protocol_path)/CodeGenerator.py',
-        '<(protocol_path)/Collections.h',
-        '<(protocol_path)/DispatcherBase.cpp',
-        '<(protocol_path)/DispatcherBase.h',
-        '<(protocol_path)/ErrorSupport.cpp',
-        '<(protocol_path)/ErrorSupport.h',
-        '<(protocol_path)/FrontendChannel.h',
-        '<(protocol_path)/Maybe.h',
-        '<(protocol_path)/Object.cpp',
-        '<(protocol_path)/Object.h',
-        '<(protocol_path)/Parser.cpp',
-        '<(protocol_path)/Parser.h',
-        '<(protocol_path)/Platform.h',
-        '<(protocol_path)/PlatformSTL.h',
-        '<(protocol_path)/String16.cpp',
-        '<(protocol_path)/String16.h',
-        '<(protocol_path)/String16STL.cpp',
-        '<(protocol_path)/String16STL.h',
-        '<(protocol_path)/ValueConversions.h',
-        '<(protocol_path)/Values.cpp',
-        '<(protocol_path)/Values.h',
-      ]
-    },
   ],
 }
diff --git a/src/inspector/inspector.gypi b/src/inspector/inspector.gypi
new file mode 100644
index 0000000..863c038
--- /dev/null
+++ b/src/inspector/inspector.gypi
@@ -0,0 +1,95 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'inspector_generated_sources': [
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
+      '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
+      '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
+      '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
+      '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
+    ],
+
+    'inspector_injected_script_source': 'injected-script-source.js',
+    'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
+    'inspector_debugger_script_source': 'debugger-script.js',
+    'inspector_generated_debugger_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/debugger-script.h',
+
+    'inspector_all_sources': [
+      '<@(inspector_generated_sources)',
+      '<(inspector_generated_injected_script)',
+      '<(inspector_generated_debugger_script)',
+      '../../include/v8-inspector.h',
+      '../../include/v8-inspector-protocol.h',
+      'inspector/injected-script.cc',
+      'inspector/injected-script.h',
+      'inspector/injected-script-native.cc',
+      'inspector/injected-script-native.h',
+      'inspector/inspected-context.cc',
+      'inspector/inspected-context.h',
+      'inspector/java-script-call-frame.cc',
+      'inspector/java-script-call-frame.h',
+      'inspector/protocol-platform.h',
+      'inspector/remote-object-id.cc',
+      'inspector/remote-object-id.h',
+      'inspector/script-breakpoint.h',
+      'inspector/search-util.cc',
+      'inspector/search-util.h',
+      'inspector/string-16.cc',
+      'inspector/string-16.h',
+      'inspector/string-util.cc',
+      'inspector/string-util.h',
+      'inspector/v8-console.cc',
+      'inspector/v8-console.h',
+      'inspector/v8-console-agent-impl.cc',
+      'inspector/v8-console-agent-impl.h',
+      'inspector/v8-console-message.cc',
+      'inspector/v8-console-message.h',
+      'inspector/v8-debugger.cc',
+      'inspector/v8-debugger.h',
+      'inspector/v8-debugger-agent-impl.cc',
+      'inspector/v8-debugger-agent-impl.h',
+      'inspector/v8-debugger-script.cc',
+      'inspector/v8-debugger-script.h',
+      'inspector/v8-function-call.cc',
+      'inspector/v8-function-call.h',
+      'inspector/v8-heap-profiler-agent-impl.cc',
+      'inspector/v8-heap-profiler-agent-impl.h',
+      'inspector/v8-injected-script-host.cc',
+      'inspector/v8-injected-script-host.h',
+      'inspector/v8-inspector-impl.cc',
+      'inspector/v8-inspector-impl.h',
+      'inspector/v8-inspector-session-impl.cc',
+      'inspector/v8-inspector-session-impl.h',
+      'inspector/v8-internal-value-type.cc',
+      'inspector/v8-internal-value-type.h',
+      'inspector/v8-profiler-agent-impl.cc',
+      'inspector/v8-profiler-agent-impl.h',
+      'inspector/v8-regex.cc',
+      'inspector/v8-regex.h',
+      'inspector/v8-runtime-agent-impl.cc',
+      'inspector/v8-runtime-agent-impl.h',
+      'inspector/v8-schema-agent-impl.cc',
+      'inspector/v8-schema-agent-impl.h',
+      'inspector/v8-stack-trace-impl.cc',
+      'inspector/v8-stack-trace-impl.h',
+      'inspector/v8-value-copier.cc',
+      'inspector/v8-value-copier.h',
+    ]
+  }
+}
diff --git a/src/inspector/inspector_protocol_config.json b/src/inspector/inspector_protocol_config.json
new file mode 100644
index 0000000..cb9e669
--- /dev/null
+++ b/src/inspector/inspector_protocol_config.json
@@ -0,0 +1,25 @@
+{
+    "protocol": {
+        "path": "js_protocol.json",
+        "package": "src/inspector/protocol",
+        "output": "protocol",
+        "namespace": ["v8_inspector", "protocol"]
+    },
+
+    "exported": {
+        "package": "include/inspector",
+        "output": "../../include/inspector",
+        "string_header": "v8-inspector.h",
+        "string_in": "StringView",
+        "string_out": "std::unique_ptr<StringBuffer>",
+        "to_string_out": "StringBufferImpl::adopt(%s)",
+        "export_macro": "V8_EXPORT"
+    },
+
+    "lib": {
+        "package": "src/inspector/protocol",
+        "output": "protocol",
+        "string_header": "src/inspector/string-util.h",
+        "platform_header": "src/inspector/protocol-platform.h"
+    }
+}
diff --git a/src/inspector/java-script-call-frame.cc b/src/inspector/java-script-call-frame.cc
new file mode 100644
index 0000000..b70af21
--- /dev/null
+++ b/src/inspector/java-script-call-frame.cc
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/java-script-call-frame.h"
+
+#include "src/inspector/string-util.h"
+
+#include "include/v8-debug.h"
+
+namespace v8_inspector {
+
+JavaScriptCallFrame::JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
+                                         v8::Local<v8::Object> callFrame)
+    : m_isolate(debuggerContext->GetIsolate()),
+      m_debuggerContext(m_isolate, debuggerContext),
+      m_callFrame(m_isolate, callFrame) {}
+
+JavaScriptCallFrame::~JavaScriptCallFrame() {}
+
+int JavaScriptCallFrame::callV8FunctionReturnInt(const char* name) const {
+  v8::HandleScope handleScope(m_isolate);
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
+      callFrame->Get(context, toV8StringInternalized(m_isolate, name))
+          .ToLocalChecked());
+  v8::Local<v8::Value> result;
+  if (!func->Call(context, callFrame, 0, nullptr).ToLocal(&result) ||
+      !result->IsInt32())
+    return 0;
+  return result.As<v8::Int32>()->Value();
+}
+
+int JavaScriptCallFrame::sourceID() const {
+  return callV8FunctionReturnInt("sourceID");
+}
+
+int JavaScriptCallFrame::line() const {
+  return callV8FunctionReturnInt("line");
+}
+
+int JavaScriptCallFrame::column() const {
+  return callV8FunctionReturnInt("column");
+}
+
+int JavaScriptCallFrame::contextId() const {
+  return callV8FunctionReturnInt("contextId");
+}
+
+bool JavaScriptCallFrame::isAtReturn() const {
+  v8::HandleScope handleScope(m_isolate);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Value> result;
+  if (!callFrame->Get(context, toV8StringInternalized(m_isolate, "isAtReturn"))
+           .ToLocal(&result) ||
+      !result->IsBoolean())
+    return false;
+  return result.As<v8::Boolean>()->BooleanValue(context).FromMaybe(false);
+}
+
+v8::Local<v8::Object> JavaScriptCallFrame::details() const {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
+      callFrame->Get(context, toV8StringInternalized(m_isolate, "details"))
+          .ToLocalChecked());
+  return v8::Local<v8::Object>::Cast(
+      func->Call(context, callFrame, 0, nullptr).ToLocalChecked());
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
+    v8::Local<v8::Value> expression) {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kRunMicrotasks);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Function> evalFunction = v8::Local<v8::Function>::Cast(
+      callFrame->Get(context, toV8StringInternalized(m_isolate, "evaluate"))
+          .ToLocalChecked());
+  return evalFunction->Call(context, callFrame, 1, &expression);
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
+      callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
+          .ToLocalChecked());
+  v8::Debug::SetLiveEditEnabled(m_isolate, true);
+  v8::MaybeLocal<v8::Value> result = restartFunction->Call(
+      m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
+  v8::Debug::SetLiveEditEnabled(m_isolate, false);
+  return result;
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::setVariableValue(
+    int scopeNumber, v8::Local<v8::Value> variableName,
+    v8::Local<v8::Value> newValue) {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Context> context =
+      v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+  v8::Local<v8::Object> callFrame =
+      v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+  v8::Local<v8::Function> setVariableValueFunction =
+      v8::Local<v8::Function>::Cast(
+          callFrame
+              ->Get(context,
+                    toV8StringInternalized(m_isolate, "setVariableValue"))
+              .ToLocalChecked());
+  v8::Local<v8::Value> argv[] = {
+      v8::Local<v8::Value>(v8::Integer::New(m_isolate, scopeNumber)),
+      variableName, newValue};
+  return setVariableValueFunction->Call(context, callFrame, arraysize(argv),
+                                        argv);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/java-script-call-frame.h b/src/inspector/java-script-call-frame.h
new file mode 100644
index 0000000..5a4ce19
--- /dev/null
+++ b/src/inspector/java-script-call-frame.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
+#define V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol-platform.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class JavaScriptCallFrame {
+ public:
+  static std::unique_ptr<JavaScriptCallFrame> create(
+      v8::Local<v8::Context> debuggerContext, v8::Local<v8::Object> callFrame) {
+    return wrapUnique(new JavaScriptCallFrame(debuggerContext, callFrame));
+  }
+  ~JavaScriptCallFrame();
+
+  int sourceID() const;
+  int line() const;
+  int column() const;
+  int contextId() const;
+
+  bool isAtReturn() const;
+  v8::Local<v8::Object> details() const;
+
+  v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression);
+  v8::MaybeLocal<v8::Value> restart();
+  v8::MaybeLocal<v8::Value> setVariableValue(int scopeNumber,
+                                             v8::Local<v8::Value> variableName,
+                                             v8::Local<v8::Value> newValue);
+
+ private:
+  JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
+                      v8::Local<v8::Object> callFrame);
+
+  int callV8FunctionReturnInt(const char* name) const;
+
+  v8::Isolate* m_isolate;
+  v8::Global<v8::Context> m_debuggerContext;
+  v8::Global<v8::Object> m_callFrame;
+
+  DISALLOW_COPY_AND_ASSIGN(JavaScriptCallFrame);
+};
+
+using JavaScriptCallFrames = std::vector<std::unique_ptr<JavaScriptCallFrame>>;
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
diff --git a/src/inspector/js_protocol-1.2.json b/src/inspector/js_protocol-1.2.json
new file mode 100644
index 0000000..aff6806
--- /dev/null
+++ b/src/inspector/js_protocol-1.2.json
@@ -0,0 +1,997 @@
+{
+    "version": { "major": "1", "minor": "2" },
+    "domains": [
+    {
+        "domain": "Schema",
+        "description": "Provides information about the protocol schema.",
+        "types": [
+            {
+                "id": "Domain",
+                "type": "object",
+                "description": "Description of the protocol domain.",
+                "exported": true,
+                "properties": [
+                    { "name": "name", "type": "string", "description": "Domain name." },
+                    { "name": "version", "type": "string", "description": "Domain version." }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "getDomains",
+                "description": "Returns supported domains.",
+                "handlers": ["browser", "renderer"],
+                "returns": [
+                    { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
+                ]
+            }
+        ]
+    },
+    {
+        "domain": "Runtime",
+        "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
+        "types": [
+            {
+                "id": "ScriptId",
+                "type": "string",
+                "description": "Unique script identifier."
+            },
+            {
+                "id": "RemoteObjectId",
+                "type": "string",
+                "description": "Unique object identifier."
+            },
+            {
+                "id": "UnserializableValue",
+                "type": "string",
+                "enum": ["Infinity", "NaN", "-Infinity", "-0"],
+                "description": "Primitive value which cannot be JSON-stringified."
+            },
+            {
+                "id": "RemoteObject",
+                "type": "object",
+                "description": "Mirror object referencing original JavaScript object.",
+                "exported": true,
+                "properties": [
+                    { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+                    { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+                    { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
+                    { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
+                    { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
+                    { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+                    { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
+                    { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
+                    { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
+                ]
+            },
+            {
+                "id": "CustomPreview",
+                "type": "object",
+                "experimental": true,
+                "properties": [
+                    { "name": "header", "type": "string"},
+                    { "name": "hasBody", "type": "boolean"},
+                    { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
+                    { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
+                    { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
+                ]
+            },
+            {
+                "id": "ObjectPreview",
+                "type": "object",
+                "experimental": true,
+                "description": "Object containing abbreviated remote object value.",
+                "properties": [
+                    { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+                    { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+                    { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+                    { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
+                    { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
+                    { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
+                ]
+            },
+            {
+                "id": "PropertyPreview",
+                "type": "object",
+                "experimental": true,
+                "properties": [
+                    { "name": "name", "type": "string", "description": "Property name." },
+                    { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
+                    { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
+                    { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
+                    { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
+                ]
+            },
+            {
+                "id": "EntryPreview",
+                "type": "object",
+                "experimental": true,
+                "properties": [
+                    { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
+                    { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
+                ]
+            },
+            {
+                "id": "PropertyDescriptor",
+                "type": "object",
+                "description": "Object property descriptor.",
+                "properties": [
+                    { "name": "name", "type": "string", "description": "Property name or symbol description." },
+                    { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
+                    { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
+                    { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
+                    { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
+                    { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
+                    { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
+                    { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+                    { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
+                    { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
+                ]
+            },
+            {
+                "id": "InternalPropertyDescriptor",
+                "type": "object",
+                "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+                "properties": [
+                    { "name": "name", "type": "string", "description": "Conventional property name." },
+                    { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
+                ]
+            },
+            {
+                "id": "CallArgument",
+                "type": "object",
+                "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
+                "properties": [
+                    { "name": "value", "type": "any", "optional": true, "description": "Primitive value." },
+                    { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
+                    { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
+                ]
+            },
+            {
+                "id": "ExecutionContextId",
+                "type": "integer",
+                "description": "Id of an execution context."
+            },
+            {
+                "id": "ExecutionContextDescription",
+                "type": "object",
+                "description": "Description of an isolated world.",
+                "properties": [
+                    { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
+                    { "name": "origin", "type": "string", "description": "Execution context origin." },
+                    { "name": "name", "type": "string", "description": "Human readable name describing given context." },
+                    { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
+                ]
+            },
+            {
+                "id": "ExceptionDetails",
+                "type": "object",
+                "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
+                "properties": [
+                    { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+                    { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
+                    { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
+                    { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+                    { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
+                    { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
+                    { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
+                    { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
+                ]
+            },
+            {
+                "id": "Timestamp",
+                "type": "number",
+                "description": "Number of milliseconds since epoch."
+            },
+            {
+                "id": "CallFrame",
+                "type": "object",
+                "description": "Stack entry for runtime errors and assertions.",
+                "properties": [
+                    { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+                    { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
+                    { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+                    { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
+                    { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
+                ]
+            },
+            {
+                "id": "StackTrace",
+                "type": "object",
+                "description": "Call frames for assertions or error messages.",
+                "exported": true,
+                "properties": [
+                    { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
+                    { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
+                    { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "evaluate",
+                "async": true,
+                "parameters": [
+                    { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+                    { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+                    { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Evaluates expression on global object."
+            },
+            {
+                "name": "awaitPromise",
+                "async": true,
+                "parameters": [
+                    { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
+                ],
+                "description": "Add handler to promise with given promise object id."
+            },
+            {
+                "name": "callFunctionOn",
+                "async": true,
+                "parameters": [
+                    { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
+                    { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
+                    { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+                    { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
+            },
+            {
+                "name": "getProperties",
+                "parameters": [
+                    { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
+                    { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
+                    { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
+                ],
+                "returns": [
+                    { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
+                    { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
+            },
+            {
+                "name": "releaseObject",
+                "parameters": [
+                    { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
+                ],
+                "description": "Releases remote object with given id."
+            },
+            {
+                "name": "releaseObjectGroup",
+                "parameters": [
+                    { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
+                ],
+                "description": "Releases all remote objects that belong to a given group."
+            },
+            {
+                "name": "runIfWaitingForDebugger",
+                "description": "Tells inspected instance to run if it was waiting for debugger to attach."
+            },
+            {
+                "name": "enable",
+                "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
+            },
+            {
+                "name": "disable",
+                "description": "Disables reporting of execution contexts creation."
+            },
+            {
+                "name": "discardConsoleEntries",
+                "description": "Discards collected exceptions and console API calls."
+            },
+            {
+                "name": "setCustomObjectFormatterEnabled",
+                "parameters": [
+                    {
+                        "name": "enabled",
+                        "type": "boolean"
+                    }
+                ],
+                "experimental": true
+            },
+            {
+                "name": "compileScript",
+                "parameters": [
+                    { "name": "expression", "type": "string", "description": "Expression to compile." },
+                    { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
+                    { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
+                ],
+                "returns": [
+                    { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Compiles expression."
+            },
+            {
+                "name": "runScript",
+                "async": true,
+                "parameters": [
+                    { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+                    { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Runs script with given id in a given context."
+            }
+        ],
+        "events": [
+            {
+                "name": "executionContextCreated",
+                "parameters": [
+                    { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution contex." }
+                ],
+                "description": "Issued when new execution context is created."
+            },
+            {
+                "name": "executionContextDestroyed",
+                "parameters": [
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
+                ],
+                "description": "Issued when execution context is destroyed."
+            },
+            {
+                "name": "executionContextsCleared",
+                "description": "Issued when all executionContexts were cleared in browser"
+            },
+            {
+                "name": "exceptionThrown",
+                "description": "Issued when exception was thrown and unhandled.",
+                "parameters": [
+                    { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
+                ]
+            },
+            {
+                "name": "exceptionRevoked",
+                "description": "Issued when unhandled exception was revoked.",
+                "parameters": [
+                    { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
+                    { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
+                ]
+            },
+            {
+                "name": "consoleAPICalled",
+                "description": "Issued when console API was called.",
+                "parameters": [
+                    { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
+                    { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
+                    { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
+                    { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
+                ]
+            },
+            {
+                "name": "inspectRequested",
+                "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
+                "parameters": [
+                    { "name": "object", "$ref": "RemoteObject" },
+                    { "name": "hints", "type": "object" }
+                ]
+            }
+        ]
+    },
+    {
+        "domain": "Debugger",
+        "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
+        "dependencies": ["Runtime"],
+        "types": [
+            {
+                "id": "BreakpointId",
+                "type": "string",
+                "description": "Breakpoint identifier."
+            },
+            {
+                "id": "CallFrameId",
+                "type": "string",
+                "description": "Call frame identifier."
+            },
+            {
+                "id": "Location",
+                "type": "object",
+                "properties": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
+                    { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
+                    { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
+                ],
+                "description": "Location in the source code."
+            },
+            {
+                "id": "ScriptPosition",
+                "experimental": true,
+                "type": "object",
+                "properties": [
+                    { "name": "lineNumber", "type": "integer" },
+                    { "name": "columnNumber", "type": "integer" }
+                ],
+                "description": "Location in the source code."
+            },
+            {
+                "id": "CallFrame",
+                "type": "object",
+                "properties": [
+                    { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
+                    { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
+                    { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
+                    { "name": "location", "$ref": "Location", "description": "Location in the source code." },
+                    { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
+                    { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
+                    { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
+                ],
+                "description": "JavaScript call frame. Array of call frames form the call stack."
+            },
+            {
+                "id": "Scope",
+                "type": "object",
+                "properties": [
+                    { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
+                    { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
+                    { "name": "name", "type": "string", "optional": true },
+                    { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
+                    { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
+                ],
+                "description": "Scope description."
+            },
+            {
+                "id": "SearchMatch",
+                "type": "object",
+                "description": "Search match for resource.",
+                "exported": true,
+                "properties": [
+                    { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
+                    { "name": "lineContent", "type": "string", "description": "Line with match content." }
+                ],
+                "experimental": true
+            }
+        ],
+        "commands": [
+            {
+                "name": "enable",
+                "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
+            },
+            {
+                "name": "disable",
+                "description": "Disables debugger for given page."
+            },
+            {
+                "name": "setBreakpointsActive",
+                "parameters": [
+                    { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
+                ],
+                "description": "Activates / deactivates all breakpoints on the page."
+            },
+            {
+                "name": "setSkipAllPauses",
+                "parameters": [
+                    { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
+                ],
+                "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
+            },
+            {
+                "name": "setBreakpointByUrl",
+                "parameters": [
+                    { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
+                    { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
+                    { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
+                    { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
+                    { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+                ],
+                "returns": [
+                    { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+                    { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
+                ],
+                "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
+            },
+            {
+                "name": "setBreakpoint",
+                "parameters": [
+                    { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
+                    { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+                ],
+                "returns": [
+                    { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+                    { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
+                ],
+                "description": "Sets JavaScript breakpoint at a given location."
+            },
+            {
+                "name": "removeBreakpoint",
+                "parameters": [
+                    { "name": "breakpointId", "$ref": "BreakpointId" }
+                ],
+                "description": "Removes JavaScript breakpoint."
+            },
+            {
+                "name": "continueToLocation",
+                "parameters": [
+                    { "name": "location", "$ref": "Location", "description": "Location to continue to." }
+                ],
+                "description": "Continues execution until specific location is reached."
+            },
+            {
+                "name": "stepOver",
+                "description": "Steps over the statement."
+            },
+            {
+                "name": "stepInto",
+                "description": "Steps into the function call."
+            },
+            {
+                "name": "stepOut",
+                "description": "Steps out of the function call."
+            },
+            {
+                "name": "pause",
+                "description": "Stops on the next JavaScript statement."
+            },
+            {
+                "name": "resume",
+                "description": "Resumes JavaScript execution."
+            },
+            {
+                "name": "searchInContent",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
+                    { "name": "query", "type": "string", "description": "String to search for."  },
+                    { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
+                    { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
+                ],
+                "returns": [
+                    { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
+                ],
+                "experimental": true,
+                "description": "Searches for given string in script content."
+            },
+            {
+                "name": "setScriptSource",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
+                    { "name": "scriptSource", "type": "string", "description": "New content of the script." },
+                    { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
+                ],
+                "returns": [
+                    { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
+                    { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack  was modified after applying the changes." },
+                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+                    { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
+                ],
+                "description": "Edits JavaScript source live."
+            },
+            {
+                "name": "restartFrame",
+                "parameters": [
+                    { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
+                ],
+                "returns": [
+                    { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
+                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+                ],
+                "description": "Restarts particular call frame from the beginning."
+            },
+            {
+                "name": "getScriptSource",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
+                ],
+                "returns": [
+                    { "name": "scriptSource", "type": "string", "description": "Script source." }
+                ],
+                "description": "Returns source for the script with given id."
+            },
+            {
+                "name": "setPauseOnExceptions",
+                "parameters": [
+                    { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
+                ],
+                "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
+            },
+            {
+                "name": "evaluateOnCallFrame",
+                "parameters": [
+                    { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
+                    { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+                    { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
+                    { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
+                ],
+                "description": "Evaluates expression on a given call frame."
+            },
+            {
+                "name": "setVariableValue",
+                "parameters": [
+                    { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
+                    { "name": "variableName", "type": "string", "description": "Variable name." },
+                    { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
+                    { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
+                ],
+                "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
+            },
+            {
+                "name": "setAsyncCallStackDepth",
+                "parameters": [
+                    { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
+                ],
+                "description": "Enables or disables async call stacks tracking."
+            },
+            {
+                "name": "setBlackboxPatterns",
+                "parameters": [
+                    { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
+                ],
+                "experimental": true,
+                "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
+            },
+            {
+                "name": "setBlackboxedRanges",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
+                    { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
+                ],
+                "experimental": true,
+                "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
+            }
+        ],
+        "events": [
+            {
+                "name": "scriptParsed",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+                    { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+                    { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+                    { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+                    { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+                    { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+                    { "name": "hash", "type": "string", "description": "Content hash of the script."},
+                    { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+                    { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
+                    { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+                ],
+                "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
+            },
+            {
+                "name": "scriptFailedToParse",
+                "parameters": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+                    { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+                    { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+                    { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+                    { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+                    { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+                    { "name": "hash", "type": "string", "description": "Content hash of the script."},
+                    { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+                    { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+                ],
+                "description": "Fired when virtual machine fails to parse the script."
+            },
+            {
+                "name": "breakpointResolved",
+                "parameters": [
+                    { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
+                    { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
+                ],
+                "description": "Fired when breakpoint is resolved to an actual script and location."
+            },
+            {
+                "name": "paused",
+                "parameters": [
+                    { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
+                    { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
+                    { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
+                    { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
+                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+                ],
+                "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
+            },
+            {
+                "name": "resumed",
+                "description": "Fired when the virtual machine resumed execution."
+            }
+        ]
+    },
+    {
+        "domain": "Console",
+        "description": "This domain is deprecated - use Runtime or Log instead.",
+        "dependencies": ["Runtime"],
+        "deprecated": true,
+        "types": [
+            {
+                "id": "ConsoleMessage",
+                "type": "object",
+                "description": "Console message.",
+                "properties": [
+                    { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
+                    { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
+                    { "name": "text", "type": "string", "description": "Message text." },
+                    { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
+                    { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
+                    { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "enable",
+                "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
+            },
+            {
+                "name": "disable",
+                "description": "Disables console domain, prevents further console messages from being reported to the client."
+            },
+            {
+                "name": "clearMessages",
+                "description": "Does nothing."
+            }
+        ],
+        "events": [
+            {
+                "name": "messageAdded",
+                "parameters": [
+                    { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
+                ],
+                "description": "Issued when new console message is added."
+            }
+        ]
+    },
+    {
+        "domain": "Profiler",
+        "dependencies": ["Runtime", "Debugger"],
+        "types": [
+            {
+                "id": "ProfileNode",
+                "type": "object",
+                "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
+                "properties": [
+                    { "name": "id", "type": "integer", "description": "Unique id of the node." },
+                    { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+                    { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
+                    { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
+                    { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+                    { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
+                ]
+            },
+            {
+                "id": "Profile",
+                "type": "object",
+                "description": "Profile.",
+                "properties": [
+                    { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
+                    { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
+                    { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
+                    { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
+                    { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
+                ]
+            },
+            {
+                "id": "PositionTickInfo",
+                "type": "object",
+                "experimental": true,
+                "description": "Specifies a number of samples attributed to a certain source position.",
+                "properties": [
+                    { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
+                    { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "enable"
+            },
+            {
+                "name": "disable"
+            },
+            {
+                "name": "setSamplingInterval",
+                "parameters": [
+                    { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
+                ],
+                "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
+            },
+            {
+                "name": "start"
+            },
+            {
+                "name": "stop",
+                "returns": [
+                    { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
+                ]
+            }
+        ],
+        "events": [
+            {
+                "name": "consoleProfileStarted",
+                "parameters": [
+                    { "name": "id", "type": "string" },
+                    { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
+                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+                ],
+                "description": "Sent when new profile recodring is started using console.profile() call."
+            },
+            {
+                "name": "consoleProfileFinished",
+                "parameters": [
+                    { "name": "id", "type": "string" },
+                    { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
+                    { "name": "profile", "$ref": "Profile" },
+                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+                ]
+            }
+        ]
+    },
+    {
+        "domain": "HeapProfiler",
+        "dependencies": ["Runtime"],
+        "experimental": true,
+        "types": [
+            {
+                "id": "HeapSnapshotObjectId",
+                "type": "string",
+                "description": "Heap snapshot object id."
+            },
+            {
+                "id": "SamplingHeapProfileNode",
+                "type": "object",
+                "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+                "properties": [
+                    { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+                    { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
+                    { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
+                ]
+            },
+            {
+                "id": "SamplingHeapProfile",
+                "type": "object",
+                "description": "Profile.",
+                "properties": [
+                    { "name": "head", "$ref": "SamplingHeapProfileNode" }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "enable"
+            },
+            {
+                "name": "disable"
+            },
+            {
+                "name": "startTrackingHeapObjects",
+                "parameters": [
+                    { "name": "trackAllocations", "type": "boolean", "optional": true }
+                ]
+            },
+            {
+                "name": "stopTrackingHeapObjects",
+                "parameters": [
+                    { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
+                ]
+            },
+            {
+                "name": "takeHeapSnapshot",
+                "parameters": [
+                    { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
+                ]
+            },
+            {
+                "name": "collectGarbage"
+            },
+            {
+                "name": "getObjectByHeapObjectId",
+                "parameters": [
+                    { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
+                    { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
+                ],
+                "returns": [
+                    { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
+                ]
+            },
+            {
+                "name": "addInspectedHeapObject",
+                "parameters": [
+                    { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
+                ],
+                "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
+            },
+            {
+                "name": "getHeapObjectId",
+                "parameters": [
+                    { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
+                ],
+                "returns": [
+                    { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
+                ]
+            },
+            {
+                "name": "startSampling",
+                "parameters": [
+                    { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
+                ]
+            },
+            {
+                "name": "stopSampling",
+                "returns": [
+                    { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
+                ]
+            }
+        ],
+        "events": [
+            {
+                "name": "addHeapSnapshotChunk",
+                "parameters": [
+                    { "name": "chunk", "type": "string" }
+                ]
+            },
+            {
+                "name": "resetProfiles"
+            },
+            {
+                "name": "reportHeapSnapshotProgress",
+                "parameters": [
+                    { "name": "done", "type": "integer" },
+                    { "name": "total", "type": "integer" },
+                    { "name": "finished", "type": "boolean", "optional": true }
+                ]
+            },
+            {
+                "name": "lastSeenObjectId",
+                "description": "If heap objects tracking has been started then backend regulary sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+                "parameters": [
+                    { "name": "lastSeenObjectId", "type": "integer" },
+                    { "name": "timestamp", "type": "number" }
+                ]
+            },
+            {
+                "name": "heapStatsUpdate",
+                "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+                "parameters": [
+                    { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
+                ]
+            }
+        ]
+    }]
+}
diff --git a/src/inspector/js_protocol.json b/src/inspector/js_protocol.json
index 314cb5f..aff6806 100644
--- a/src/inspector/js_protocol.json
+++ b/src/inspector/js_protocol.json
@@ -1,6 +1,33 @@
 {
-    "version": { "major": "1", "minor": "1" },
-    "domains": [{
+    "version": { "major": "1", "minor": "2" },
+    "domains": [
+    {
+        "domain": "Schema",
+        "description": "Provides information about the protocol schema.",
+        "types": [
+            {
+                "id": "Domain",
+                "type": "object",
+                "description": "Description of the protocol domain.",
+                "exported": true,
+                "properties": [
+                    { "name": "name", "type": "string", "description": "Domain name." },
+                    { "name": "version", "type": "string", "description": "Domain version." }
+                ]
+            }
+        ],
+        "commands": [
+            {
+                "name": "getDomains",
+                "description": "Returns supported domains.",
+                "handlers": ["browser", "renderer"],
+                "returns": [
+                    { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
+                ]
+            }
+        ]
+    },
+    {
         "domain": "Runtime",
         "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
         "types": [
@@ -15,25 +42,32 @@
                 "description": "Unique object identifier."
             },
             {
+                "id": "UnserializableValue",
+                "type": "string",
+                "enum": ["Infinity", "NaN", "-Infinity", "-0"],
+                "description": "Primitive value which cannot be JSON-stringified."
+            },
+            {
                 "id": "RemoteObject",
                 "type": "object",
                 "description": "Mirror object referencing original JavaScript object.",
                 "exported": true,
                 "properties": [
                     { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
-                    { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+                    { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
                     { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
-                    { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested), or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
+                    { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
+                    { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
                     { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
                     { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
-                    { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "hidden": true },
-                    { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "hidden": true}
+                    { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
+                    { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
                 ]
             },
             {
                 "id": "CustomPreview",
                 "type": "object",
-                "hidden": true,
+                "experimental": true,
                 "properties": [
                     { "name": "header", "type": "string"},
                     { "name": "hasBody", "type": "boolean"},
@@ -45,7 +79,7 @@
             {
                 "id": "ObjectPreview",
                 "type": "object",
-                "hidden": true,
+                "experimental": true,
                 "description": "Object containing abbreviated remote object value.",
                 "properties": [
                     { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
@@ -59,7 +93,7 @@
             {
                 "id": "PropertyPreview",
                 "type": "object",
-                "hidden": true,
+                "experimental": true,
                 "properties": [
                     { "name": "name", "type": "string", "description": "Property name." },
                     { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
@@ -71,7 +105,7 @@
             {
                 "id": "EntryPreview",
                 "type": "object",
-                "hidden": true,
+                "experimental": true,
                 "properties": [
                     { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
                     { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
@@ -90,8 +124,8 @@
                     { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
                     { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
                     { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
-                    { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object.", "hidden": true },
-                    { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type.", "hidden": true }
+                    { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
+                    { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
                 ]
             },
             {
@@ -101,17 +135,16 @@
                 "properties": [
                     { "name": "name", "type": "string", "description": "Conventional property name." },
                     { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
-                ],
-                "hidden": true
+                ]
             },
             {
                 "id": "CallArgument",
                 "type": "object",
-                "description": "Represents function call argument. Either remote object id <code>objectId</code> or primitive <code>value</code> or neither of (for undefined) them should be specified.",
+                "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
                 "properties": [
-                    { "name": "value", "type": "any", "optional": true, "description": "Primitive value, or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
-                    { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." },
-                    { "name": "type", "optional": true, "hidden": true, "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." }
+                    { "name": "value", "type": "any", "optional": true, "description": "Primitive value." },
+                    { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
+                    { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
                 ]
             },
             {
@@ -125,31 +158,31 @@
                 "description": "Description of an isolated world.",
                 "properties": [
                     { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
-                    { "name": "isDefault", "type": "boolean", "description": "Whether context is the default page context (as opposite to e.g. context of content script).", "hidden": true },
-                    { "name": "origin", "type": "string", "description": "Execution context origin.", "hidden": true},
-                    { "name": "name", "type": "string", "description": "Human readable name describing given context.", "hidden": true},
-                    { "name": "frameId", "type": "string", "description": "Id of the owning frame. May be an empty string if the context is not associated with a frame." }
+                    { "name": "origin", "type": "string", "description": "Execution context origin." },
+                    { "name": "name", "type": "string", "description": "Human readable name describing given context." },
+                    { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
                 ]
             },
             {
                 "id": "ExceptionDetails",
                 "type": "object",
-                "hidden": true,
                 "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
                 "properties": [
-                    { "name": "text", "type": "string", "description": "Exception text." },
-                    { "name": "scriptId", "$ref": "ScriptId", "description": "Script ID of the exception location." },
+                    { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+                    { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
                     { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
                     { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+                    { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
                     { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
-                    { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." }
+                    { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
+                    { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
                 ]
             },
             {
                 "id": "Timestamp",
                 "type": "number",
-                "description": "Number of milliseconds since epoch.",
-                "hidden": true
+                "description": "Number of milliseconds since epoch."
             },
             {
                 "id": "CallFrame",
@@ -171,7 +204,7 @@
                 "properties": [
                     { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
                     { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
-                    { "name": "parent", "$ref": "StackTrace", "optional": true, "hidden": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+                    { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
                 ]
             }
         ],
@@ -182,24 +215,22 @@
                 "parameters": [
                     { "name": "expression", "type": "string", "description": "Expression to evaluate." },
                     { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
-                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation.", "hidden": true },
-                    { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
-                    { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which isolated context to perform evaluation. Each content script lives in an isolated context and this parameter may be used to specify one of those contexts. If the parameter is omitted or 0 the evaluation will be performed in the context of the inspected page." },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
                     { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
-                    { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
-                    { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." },
-                    { "name": "awaitPromise", "type": "boolean", "optional":true, "hidden": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+                    { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
                 ],
                 "returns": [
                     { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
-                    { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
-                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
                 ],
                 "description": "Evaluates expression on global object."
             },
             {
                 "name": "awaitPromise",
-                "hidden": true,
                 "async": true,
                 "parameters": [
                     { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
@@ -208,25 +239,26 @@
                 ],
                 "returns": [
                     { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
-                    { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the promise was rejected." },
                     { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
                 ],
                 "description": "Add handler to promise with given promise object id."
             },
             {
                 "name": "callFunctionOn",
+                "async": true,
                 "parameters": [
                     { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
                     { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
                     { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
-                    { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether function call should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
                     { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
-                    { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
-                    { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." }
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+                    { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
                 ],
                 "returns": [
                     { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
-                    { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." }
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
                 ],
                 "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
             },
@@ -235,13 +267,13 @@
                 "parameters": [
                     { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
                     { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
-                    { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "hidden": true },
-                    { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the results." }
+                    { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
                 ],
                 "returns": [
                     { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
-                    { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself).", "hidden": true },
-                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+                    { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
                 ],
                 "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
             },
@@ -260,9 +292,8 @@
                 "description": "Releases all remote objects that belong to a given group."
             },
             {
-                "name": "run",
-                "hidden": true,
-                "description": "Tells inspected instance(worker or page) that it can run in case it was started paused."
+                "name": "runIfWaitingForDebugger",
+                "description": "Tells inspected instance to run if it was waiting for debugger to attach."
             },
             {
                 "name": "enable",
@@ -270,12 +301,10 @@
             },
             {
                 "name": "disable",
-                "hidden": true,
                 "description": "Disables reporting of execution contexts creation."
             },
             {
                 "name": "discardConsoleEntries",
-                "hidden": true,
                 "description": "Discards collected exceptions and console API calls."
             },
             {
@@ -286,16 +315,15 @@
                         "type": "boolean"
                     }
                 ],
-                "hidden": true
+                "experimental": true
             },
             {
                 "name": "compileScript",
-                "hidden": true,
                 "parameters": [
                     { "name": "expression", "type": "string", "description": "Expression to compile." },
                     { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
                     { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
-                    { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." }
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
                 ],
                 "returns": [
                     { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
@@ -305,13 +333,16 @@
             },
             {
                 "name": "runScript",
-                "hidden": true,
+                "async": true,
                 "parameters": [
                     { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
-                    { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." },
+                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
                     { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
-                    { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether script run should stop on exceptions and mute console. Overrides setPauseOnException state." },
-                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." }
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+                    { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
                 ],
                 "returns": [
                     { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
@@ -343,22 +374,17 @@
                 "name": "exceptionThrown",
                 "description": "Issued when exception was thrown and unhandled.",
                 "parameters": [
-                    { "name": "exceptionId", "type": "integer", "description": "Exception id." },
                     { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
-                    { "name": "details", "$ref": "ExceptionDetails" },
-                    { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object." },
-                    { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
-                ],
-                "hidden": true
+                    { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
+                ]
             },
             {
                 "name": "exceptionRevoked",
                 "description": "Issued when unhandled exception was revoked.",
                 "parameters": [
-                    { "name": "message", "type": "string", "description": "Message describing why exception was revoked." },
+                    { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
                     { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
-                ],
-                "hidden": true
+                ]
             },
             {
                 "name": "consoleAPICalled",
@@ -369,16 +395,15 @@
                     { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
                     { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
                     { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
-                ],
-                "hidden": true
+                ]
             },
             {
                 "name": "inspectRequested",
+                "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
                 "parameters": [
                     { "name": "object", "$ref": "RemoteObject" },
                     { "name": "hints", "type": "object" }
-                ],
-                "hidden": true
+                ]
             }
         ]
     },
@@ -409,7 +434,7 @@
             },
             {
                 "id": "ScriptPosition",
-                "hidden": true,
+                "experimental": true,
                 "type": "object",
                 "properties": [
                     { "name": "lineNumber", "type": "integer" },
@@ -423,11 +448,11 @@
                 "properties": [
                     { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
                     { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
-                    { "name": "functionLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code." },
+                    { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
                     { "name": "location", "$ref": "Location", "description": "Location in the source code." },
                     { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
                     { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
-                    { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "hidden": true, "description": "The value being returned, if the function is at return point." }
+                    { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
                 ],
                 "description": "JavaScript call frame. Array of call frames form the call stack."
             },
@@ -437,9 +462,9 @@
                 "properties": [
                     { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
                     { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
-                    { "name": "name", "type": "string", "optional": true, "hidden": true },
-                    { "name": "startLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope starts" },
-                    { "name": "endLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope ends" }
+                    { "name": "name", "type": "string", "optional": true },
+                    { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
+                    { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
                 ],
                 "description": "Scope description."
             },
@@ -452,7 +477,7 @@
                     { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
                     { "name": "lineContent", "type": "string", "description": "Line with match content." }
                 ],
-                "hidden": true
+                "experimental": true
             }
         ],
         "commands": [
@@ -473,9 +498,8 @@
             },
             {
                 "name": "setSkipAllPauses",
-                "hidden": true,
                 "parameters": [
-                    { "name": "skipped", "type": "boolean", "description": "New value for skip pauses state." }
+                    { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
                 ],
                 "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
             },
@@ -516,8 +540,7 @@
             {
                 "name": "continueToLocation",
                 "parameters": [
-                    { "name": "location", "$ref": "Location", "description": "Location to continue to." },
-                    { "name": "interstatementLocation", "type": "boolean", "optional": true, "hidden": true, "description": "Allows breakpoints at the intemediate positions inside statements." }
+                    { "name": "location", "$ref": "Location", "description": "Location to continue to." }
                 ],
                 "description": "Continues execution until specific location is reached."
             },
@@ -552,27 +575,21 @@
                 "returns": [
                     { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
                 ],
+                "experimental": true,
                 "description": "Searches for given string in script content."
             },
             {
-                "name": "canSetScriptSource",
-                "returns": [
-                    { "name": "result", "type": "boolean", "description": "True if <code>setScriptSource</code> is supported." }
-                ],
-                "description": "Always returns true."
-            },
-            {
                 "name": "setScriptSource",
                 "parameters": [
                     { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
                     { "name": "scriptSource", "type": "string", "description": "New content of the script." },
-                    { "name": "preview", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Preview mode may be used to get result description without actually modifying the code.", "hidden": true }
+                    { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
                 ],
                 "returns": [
                     { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
-                    { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack  was modified after applying the changes.", "hidden": true },
-                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true },
-                    { "name": "compileError", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Error data if any." }
+                    { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack  was modified after applying the changes." },
+                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+                    { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
                 ],
                 "description": "Edits JavaScript source live."
             },
@@ -585,7 +602,6 @@
                     { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
                     { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
                 ],
-                "hidden": true,
                 "description": "Restarts particular call frame from the beginning."
             },
             {
@@ -611,15 +627,14 @@
                     { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
                     { "name": "expression", "type": "string", "description": "Expression to evaluate." },
                     { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
-                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false.", "hidden": true },
-                    { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+                    { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
+                    { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
                     { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
-                    { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." }
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
                 ],
                 "returns": [
                     { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
-                    { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
-                    { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+                    { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
                 ],
                 "description": "Evaluates expression on a given call frame."
             },
@@ -631,24 +646,13 @@
                     { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
                     { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
                 ],
-                "hidden": true,
                 "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
             },
             {
-                "name": "getBacktrace",
-                "returns": [
-                    { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
-                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
-                ],
-                "hidden": true,
-                "description": "Returns call stack including variables changed since VM was paused. VM must be paused."
-            },
-            {
                 "name": "setAsyncCallStackDepth",
                 "parameters": [
                     { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
                 ],
-                "hidden": true,
                 "description": "Enables or disables async call stacks tracking."
             },
             {
@@ -656,7 +660,7 @@
                 "parameters": [
                     { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
                 ],
-                "hidden": true,
+                "experimental": true,
                 "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
             },
             {
@@ -665,7 +669,7 @@
                     { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
                     { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
                 ],
-                "hidden": true,
+                "experimental": true,
                 "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
             }
         ],
@@ -679,14 +683,12 @@
                     { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
                     { "name": "endLine", "type": "integer", "description": "Last line of the script." },
                     { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
-                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
-                    { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
-                    { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
-                    { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
-                    { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "hidden": true },
+                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+                    { "name": "hash", "type": "string", "description": "Content hash of the script."},
+                    { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+                    { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
                     { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
-                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
-                    { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
                 ],
                 "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
             },
@@ -699,13 +701,11 @@
                     { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
                     { "name": "endLine", "type": "integer", "description": "Last line of the script." },
                     { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
-                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
-                    { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
-                    { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
-                    { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
+                    { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+                    { "name": "hash", "type": "string", "description": "Content hash of the script."},
+                    { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
                     { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
-                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
-                    { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
                 ],
                 "description": "Fired when virtual machine fails to parse the script."
             },
@@ -723,8 +723,8 @@
                     { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
                     { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
                     { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
-                    { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs", "hidden": true },
-                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true }
+                    { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
+                    { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
                 ],
                 "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
             },
@@ -775,56 +775,42 @@
                     { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
                 ],
                 "description": "Issued when new console message is added."
-            },
-            {
-                "name": "messageRepeatCountUpdated",
-                "parameters": [
-                    { "name": "count", "type": "integer", "description": "New repeat count value." },
-                    { "name": "timestamp", "$ref": "Runtime.Timestamp", "description": "Timestamp of most recent message in batch.", "hidden": true }
-                ],
-                "description": "Not issued.",
-                "deprecated": true
-            },
-            {
-                "name": "messagesCleared",
-                "description": "Not issued.",
-                "deprecated": true
             }
         ]
     },
     {
         "domain": "Profiler",
         "dependencies": ["Runtime", "Debugger"],
-        "hidden": true,
         "types": [
             {
-                "id": "CPUProfileNode",
+                "id": "ProfileNode",
                 "type": "object",
-                "description": "CPU Profile node. Holds callsite information, execution statistics and child nodes.",
+                "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
                 "properties": [
-                    { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
-                    { "name": "hitCount", "type": "integer", "description": "Number of samples where this node was on top of the call stack." },
-                    { "name": "children", "type": "array", "items": { "$ref": "CPUProfileNode" }, "description": "Child nodes." },
-                    { "name": "deoptReason", "type": "string", "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
                     { "name": "id", "type": "integer", "description": "Unique id of the node." },
-                    { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "description": "An array of source position ticks." }
+                    { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+                    { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
+                    { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
+                    { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+                    { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
                 ]
             },
             {
-                "id": "CPUProfile",
+                "id": "Profile",
                 "type": "object",
                 "description": "Profile.",
                 "properties": [
-                    { "name": "head", "$ref": "CPUProfileNode" },
-                    { "name": "startTime", "type": "number", "description": "Profiling start time in seconds." },
-                    { "name": "endTime", "type": "number", "description": "Profiling end time in seconds." },
+                    { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
+                    { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
+                    { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
                     { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
-                    { "name": "timestamps", "optional": true, "type": "array", "items": { "type": "number" }, "description": "Timestamps of the samples in microseconds." }
+                    { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
                 ]
             },
             {
                 "id": "PositionTickInfo",
                 "type": "object",
+                "experimental": true,
                 "description": "Specifies a number of samples attributed to a certain source position.",
                 "properties": [
                     { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
@@ -852,7 +838,7 @@
             {
                 "name": "stop",
                 "returns": [
-                    { "name": "profile", "$ref": "CPUProfile", "description": "Recorded profile." }
+                    { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
                 ]
             }
         ],
@@ -862,7 +848,7 @@
                 "parameters": [
                     { "name": "id", "type": "string" },
                     { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
-                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argument to console.profile()." }
+                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
                 ],
                 "description": "Sent when new profile recodring is started using console.profile() call."
             },
@@ -871,8 +857,8 @@
                 "parameters": [
                     { "name": "id", "type": "string" },
                     { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
-                    { "name": "profile", "$ref": "CPUProfile" },
-                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argunet to console.profile()." }
+                    { "name": "profile", "$ref": "Profile" },
+                    { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
                 ]
             }
         ]
@@ -880,7 +866,7 @@
     {
         "domain": "HeapProfiler",
         "dependencies": ["Runtime"],
-        "hidden": true,
+        "experimental": true,
         "types": [
             {
                 "id": "HeapSnapshotObjectId",
diff --git a/src/inspector/protocol-platform.h b/src/inspector/protocol-platform.h
new file mode 100644
index 0000000..c772393
--- /dev/null
+++ b/src/inspector/protocol-platform.h
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_PROTOCOLPLATFORM_H_
+#define V8_INSPECTOR_PROTOCOLPLATFORM_H_
+
+#include <memory>
+
+#include "src/base/logging.h"
+
+namespace v8_inspector {
+
+template <typename T>
+std::unique_ptr<T> wrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_PROTOCOLPLATFORM_H_
diff --git a/src/inspector/remote-object-id.cc b/src/inspector/remote-object-id.cc
new file mode 100644
index 0000000..d83020c
--- /dev/null
+++ b/src/inspector/remote-object-id.cc
@@ -0,0 +1,76 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/remote-object-id.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
+
+std::unique_ptr<protocol::DictionaryValue>
+RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
+  std::unique_ptr<protocol::Value> parsedValue = protocol::parseJSON(objectId);
+  if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
+    return nullptr;
+
+  std::unique_ptr<protocol::DictionaryValue> parsedObjectId(
+      protocol::DictionaryValue::cast(parsedValue.release()));
+  bool success =
+      parsedObjectId->getInteger("injectedScriptId", &m_injectedScriptId);
+  if (success) return parsedObjectId;
+  return nullptr;
+}
+
+RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
+
+std::unique_ptr<RemoteObjectId> RemoteObjectId::parse(
+    ErrorString* errorString, const String16& objectId) {
+  std::unique_ptr<RemoteObjectId> result(new RemoteObjectId());
+  std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
+      result->parseInjectedScriptId(objectId);
+  if (!parsedObjectId) {
+    *errorString = "Invalid remote object id";
+    return nullptr;
+  }
+
+  bool success = parsedObjectId->getInteger("id", &result->m_id);
+  if (!success) {
+    *errorString = "Invalid remote object id";
+    return nullptr;
+  }
+  return result;
+}
+
+RemoteCallFrameId::RemoteCallFrameId()
+    : RemoteObjectIdBase(), m_frameOrdinal(0) {}
+
+std::unique_ptr<RemoteCallFrameId> RemoteCallFrameId::parse(
+    ErrorString* errorString, const String16& objectId) {
+  std::unique_ptr<RemoteCallFrameId> result(new RemoteCallFrameId());
+  std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
+      result->parseInjectedScriptId(objectId);
+  if (!parsedObjectId) {
+    *errorString = "Invalid call frame id";
+    return nullptr;
+  }
+
+  bool success = parsedObjectId->getInteger("ordinal", &result->m_frameOrdinal);
+  if (!success) {
+    *errorString = "Invalid call frame id";
+    return nullptr;
+  }
+
+  return result;
+}
+
+String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
+  return "{\"ordinal\":" + String16::fromInteger(frameOrdinal) +
+         ",\"injectedScriptId\":" + String16::fromInteger(injectedScriptId) +
+         "}";
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/remote-object-id.h b/src/inspector/remote-object-id.h
new file mode 100644
index 0000000..a32f568
--- /dev/null
+++ b/src/inspector/remote-object-id.h
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_REMOTEOBJECTID_H_
+#define V8_INSPECTOR_REMOTEOBJECTID_H_
+
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+using protocol::ErrorString;
+
+class RemoteObjectIdBase {
+ public:
+  int contextId() const { return m_injectedScriptId; }
+
+ protected:
+  RemoteObjectIdBase();
+  ~RemoteObjectIdBase() {}
+
+  std::unique_ptr<protocol::DictionaryValue> parseInjectedScriptId(
+      const String16&);
+
+  int m_injectedScriptId;
+};
+
+class RemoteObjectId final : public RemoteObjectIdBase {
+ public:
+  static std::unique_ptr<RemoteObjectId> parse(ErrorString*, const String16&);
+  ~RemoteObjectId() {}
+  int id() const { return m_id; }
+
+ private:
+  RemoteObjectId();
+
+  int m_id;
+};
+
+class RemoteCallFrameId final : public RemoteObjectIdBase {
+ public:
+  static std::unique_ptr<RemoteCallFrameId> parse(ErrorString*,
+                                                  const String16&);
+  ~RemoteCallFrameId() {}
+
+  int frameOrdinal() const { return m_frameOrdinal; }
+
+  static String16 serialize(int injectedScriptId, int frameOrdinal);
+
+ private:
+  RemoteCallFrameId();
+
+  int m_frameOrdinal;
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_REMOTEOBJECTID_H_
diff --git a/src/inspector/script-breakpoint.h b/src/inspector/script-breakpoint.h
new file mode 100644
index 0000000..025233d
--- /dev/null
+++ b/src/inspector/script-breakpoint.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_SCRIPTBREAKPOINT_H_
+#define V8_INSPECTOR_SCRIPTBREAKPOINT_H_
+
+#include "src/inspector/string-16.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint {
+  ScriptBreakpoint() : ScriptBreakpoint(0, 0, String16()) {}
+
+  ScriptBreakpoint(int lineNumber, int columnNumber, const String16& condition)
+      : lineNumber(lineNumber),
+        columnNumber(columnNumber),
+        condition(condition) {}
+
+  int lineNumber;
+  int columnNumber;
+  String16 condition;
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_SCRIPTBREAKPOINT_H_
diff --git a/src/inspector/search-util.cc b/src/inspector/search-util.cc
new file mode 100644
index 0000000..a6fba06
--- /dev/null
+++ b/src/inspector/search-util.cc
@@ -0,0 +1,164 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/search-util.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-regex.h"
+
+namespace v8_inspector {
+
+namespace {
+
+String16 findMagicComment(const String16& content, const String16& name,
+                          bool multiline) {
+  DCHECK(name.find("=") == String16::kNotFound);
+  size_t length = content.length();
+  size_t nameLength = name.length();
+
+  size_t pos = length;
+  size_t equalSignPos = 0;
+  size_t closingCommentPos = 0;
+  while (true) {
+    pos = content.reverseFind(name, pos);
+    if (pos == String16::kNotFound) return String16();
+
+    // Check for a /\/[\/*][@#][ \t]/ regexp (length of 4) before found name.
+    if (pos < 4) return String16();
+    pos -= 4;
+    if (content[pos] != '/') continue;
+    if ((content[pos + 1] != '/' || multiline) &&
+        (content[pos + 1] != '*' || !multiline))
+      continue;
+    if (content[pos + 2] != '#' && content[pos + 2] != '@') continue;
+    if (content[pos + 3] != ' ' && content[pos + 3] != '\t') continue;
+    equalSignPos = pos + 4 + nameLength;
+    if (equalSignPos < length && content[equalSignPos] != '=') continue;
+    if (multiline) {
+      closingCommentPos = content.find("*/", equalSignPos + 1);
+      if (closingCommentPos == String16::kNotFound) return String16();
+    }
+
+    break;
+  }
+
+  DCHECK(equalSignPos);
+  DCHECK(!multiline || closingCommentPos);
+  size_t urlPos = equalSignPos + 1;
+  String16 match = multiline
+                       ? content.substring(urlPos, closingCommentPos - urlPos)
+                       : content.substring(urlPos);
+
+  size_t newLine = match.find("\n");
+  if (newLine != String16::kNotFound) match = match.substring(0, newLine);
+  match = match.stripWhiteSpace();
+
+  for (size_t i = 0; i < match.length(); ++i) {
+    UChar c = match[i];
+    if (c == '"' || c == '\'' || c == ' ' || c == '\t') return "";
+  }
+
+  return match;
+}
+
+String16 createSearchRegexSource(const String16& text) {
+  String16Builder result;
+
+  for (size_t i = 0; i < text.length(); i++) {
+    UChar c = text[i];
+    if (c == '[' || c == ']' || c == '(' || c == ')' || c == '{' || c == '}' ||
+        c == '+' || c == '-' || c == '*' || c == '.' || c == ',' || c == '?' ||
+        c == '\\' || c == '^' || c == '$' || c == '|') {
+      result.append('\\');
+    }
+    result.append(c);
+  }
+
+  return result.toString();
+}
+
+std::unique_ptr<std::vector<size_t>> lineEndings(const String16& text) {
+  std::unique_ptr<std::vector<size_t>> result(new std::vector<size_t>());
+
+  const String16 lineEndString = "\n";
+  size_t start = 0;
+  while (start < text.length()) {
+    size_t lineEnd = text.find(lineEndString, start);
+    if (lineEnd == String16::kNotFound) break;
+
+    result->push_back(lineEnd);
+    start = lineEnd + 1;
+  }
+  result->push_back(text.length());
+
+  return result;
+}
+
+std::vector<std::pair<int, String16>> scriptRegexpMatchesByLines(
+    const V8Regex& regex, const String16& text) {
+  std::vector<std::pair<int, String16>> result;
+  if (text.isEmpty()) return result;
+
+  std::unique_ptr<std::vector<size_t>> endings(lineEndings(text));
+  size_t size = endings->size();
+  size_t start = 0;
+  for (size_t lineNumber = 0; lineNumber < size; ++lineNumber) {
+    size_t lineEnd = endings->at(lineNumber);
+    String16 line = text.substring(start, lineEnd - start);
+    if (line.length() && line[line.length() - 1] == '\r')
+      line = line.substring(0, line.length() - 1);
+
+    int matchLength;
+    if (regex.match(line, 0, &matchLength) != -1)
+      result.push_back(std::pair<int, String16>(lineNumber, line));
+
+    start = lineEnd + 1;
+  }
+  return result;
+}
+
+std::unique_ptr<protocol::Debugger::SearchMatch> buildObjectForSearchMatch(
+    int lineNumber, const String16& lineContent) {
+  return protocol::Debugger::SearchMatch::create()
+      .setLineNumber(lineNumber)
+      .setLineContent(lineContent)
+      .build();
+}
+
+std::unique_ptr<V8Regex> createSearchRegex(V8InspectorImpl* inspector,
+                                           const String16& query,
+                                           bool caseSensitive, bool isRegex) {
+  String16 regexSource = isRegex ? query : createSearchRegexSource(query);
+  return wrapUnique(new V8Regex(inspector, regexSource, caseSensitive));
+}
+
+}  // namespace
+
+std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>>
+searchInTextByLinesImpl(V8InspectorSession* session, const String16& text,
+                        const String16& query, const bool caseSensitive,
+                        const bool isRegex) {
+  std::unique_ptr<V8Regex> regex = createSearchRegex(
+      static_cast<V8InspectorSessionImpl*>(session)->inspector(), query,
+      caseSensitive, isRegex);
+  std::vector<std::pair<int, String16>> matches =
+      scriptRegexpMatchesByLines(*regex.get(), text);
+
+  std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> result;
+  for (const auto& match : matches)
+    result.push_back(buildObjectForSearchMatch(match.first, match.second));
+  return result;
+}
+
+String16 findSourceURL(const String16& content, bool multiline) {
+  return findMagicComment(content, "sourceURL", multiline);
+}
+
+String16 findSourceMapURL(const String16& content, bool multiline) {
+  return findMagicComment(content, "sourceMappingURL", multiline);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/search-util.h b/src/inspector/search-util.h
new file mode 100644
index 0000000..8f5753b
--- /dev/null
+++ b/src/inspector/search-util.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_SEARCHUTIL_H_
+#define V8_INSPECTOR_SEARCHUTIL_H_
+
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+class V8InspectorSession;
+
+String16 findSourceURL(const String16& content, bool multiline);
+String16 findSourceMapURL(const String16& content, bool multiline);
+std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>>
+searchInTextByLinesImpl(V8InspectorSession*, const String16& text,
+                        const String16& query, bool caseSensitive,
+                        bool isRegex);
+
+}  //  namespace v8_inspector
+
+#endif  // V8_INSPECTOR_SEARCHUTIL_H_
diff --git a/src/inspector/string-16.cc b/src/inspector/string-16.cc
new file mode 100644
index 0000000..f608460
--- /dev/null
+++ b/src/inspector/string-16.cc
@@ -0,0 +1,518 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/string-16.h"
+
+#include <algorithm>
+#include <cctype>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <locale>
+#include <sstream>
+#include <string>
+
+#include "src/base/platform/platform.h"
+#include "src/inspector/protocol-platform.h"
+
+namespace v8_inspector {
+
+namespace {
+
+bool isASCII(UChar c) { return !(c & ~0x7F); }
+
+bool isSpaceOrNewLine(UChar c) {
+  return isASCII(c) && c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+}
+
+int charactersToInteger(const UChar* characters, size_t length,
+                        bool* ok = nullptr) {
+  std::vector<char> buffer;
+  buffer.reserve(length + 1);
+  for (size_t i = 0; i < length; ++i) {
+    if (!isASCII(characters[i])) {
+      if (ok) *ok = false;
+      return 0;
+    }
+    buffer.push_back(static_cast<char>(characters[i]));
+  }
+  buffer.push_back('\0');
+
+  char* endptr;
+  int64_t result =
+      static_cast<int64_t>(std::strtol(buffer.data(), &endptr, 10));
+  if (ok) {
+    *ok = !(*endptr) && result <= std::numeric_limits<int>::max() &&
+          result >= std::numeric_limits<int>::min();
+  }
+  return static_cast<int>(result);
+}
+
+const UChar replacementCharacter = 0xFFFD;
+using UChar32 = uint32_t;
+
+inline int inlineUTF8SequenceLengthNonASCII(char b0) {
+  if ((b0 & 0xC0) != 0xC0) return 0;
+  if ((b0 & 0xE0) == 0xC0) return 2;
+  if ((b0 & 0xF0) == 0xE0) return 3;
+  if ((b0 & 0xF8) == 0xF0) return 4;
+  return 0;
+}
+
+inline int inlineUTF8SequenceLength(char b0) {
+  return isASCII(b0) ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
+}
+
+// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+// into the first byte, depending on how many bytes follow.  There are
+// as many entries in this table as there are UTF-8 sequence types.
+// (I.e., one byte sequence, two byte... etc.). Remember that sequences
+// for *legal* UTF-8 will be 4 or fewer bytes total.
+static const unsigned char firstByteMark[7] = {0x00, 0x00, 0xC0, 0xE0,
+                                               0xF0, 0xF8, 0xFC};
+
+typedef enum {
+  conversionOK,     // conversion successful
+  sourceExhausted,  // partial character in source, but hit end
+  targetExhausted,  // insuff. room in target for conversion
+  sourceIllegal     // source sequence is illegal/malformed
+} ConversionResult;
+
+ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
+                                    const UChar* sourceEnd, char** targetStart,
+                                    char* targetEnd, bool strict) {
+  ConversionResult result = conversionOK;
+  const UChar* source = *sourceStart;
+  char* target = *targetStart;
+  while (source < sourceEnd) {
+    UChar32 ch;
+    uint32_t bytesToWrite = 0;
+    const UChar32 byteMask = 0xBF;
+    const UChar32 byteMark = 0x80;
+    const UChar* oldSource =
+        source;  // In case we have to back up because of target overflow.
+    ch = static_cast<uint16_t>(*source++);
+    // If we have a surrogate pair, convert to UChar32 first.
+    if (ch >= 0xD800 && ch <= 0xDBFF) {
+      // If the 16 bits following the high surrogate are in the source buffer...
+      if (source < sourceEnd) {
+        UChar32 ch2 = static_cast<uint16_t>(*source);
+        // If it's a low surrogate, convert to UChar32.
+        if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
+          ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000;
+          ++source;
+        } else if (strict) {  // it's an unpaired high surrogate
+          --source;           // return to the illegal value itself
+          result = sourceIllegal;
+          break;
+        }
+      } else {     // We don't have the 16 bits following the high surrogate.
+        --source;  // return to the high surrogate
+        result = sourceExhausted;
+        break;
+      }
+    } else if (strict) {
+      // UTF-16 surrogate values are illegal in UTF-32
+      if (ch >= 0xDC00 && ch <= 0xDFFF) {
+        --source;  // return to the illegal value itself
+        result = sourceIllegal;
+        break;
+      }
+    }
+    // Figure out how many bytes the result will require
+    if (ch < (UChar32)0x80) {
+      bytesToWrite = 1;
+    } else if (ch < (UChar32)0x800) {
+      bytesToWrite = 2;
+    } else if (ch < (UChar32)0x10000) {
+      bytesToWrite = 3;
+    } else if (ch < (UChar32)0x110000) {
+      bytesToWrite = 4;
+    } else {
+      bytesToWrite = 3;
+      ch = replacementCharacter;
+    }
+
+    target += bytesToWrite;
+    if (target > targetEnd) {
+      source = oldSource;  // Back up source pointer!
+      target -= bytesToWrite;
+      result = targetExhausted;
+      break;
+    }
+    switch (bytesToWrite) {  // note: everything falls through.
+      case 4:
+        *--target = static_cast<char>((ch | byteMark) & byteMask);
+        ch >>= 6;
+      case 3:
+        *--target = static_cast<char>((ch | byteMark) & byteMask);
+        ch >>= 6;
+      case 2:
+        *--target = static_cast<char>((ch | byteMark) & byteMask);
+        ch >>= 6;
+      case 1:
+        *--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
+    }
+    target += bytesToWrite;
+  }
+  *sourceStart = source;
+  *targetStart = target;
+  return result;
+}
+
+/**
+ * Is this code point a BMP code point (U+0000..U+ffff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_BMP(c) ((uint32_t)(c) <= 0xffff)
+
+/**
+ * Is this code point a supplementary code point (U+10000..U+10ffff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x10000) <= 0xfffff)
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define U_IS_SURROGATE(c) (((c)&0xfffff800) == 0xd800)
+
+/**
+ * Get the lead surrogate (0xd800..0xdbff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xd7c0)
+
+/**
+ * Get the trail surrogate (0xdc00..0xdfff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff) | 0xdc00)
+
+// This must be called with the length pre-determined by the first byte.
+// If presented with a length > 4, this returns false.  The Unicode
+// definition of UTF-8 goes up to 4-byte sequences.
+static bool isLegalUTF8(const unsigned char* source, int length) {
+  unsigned char a;
+  const unsigned char* srcptr = source + length;
+  switch (length) {
+    default:
+      return false;
+    // Everything else falls through when "true"...
+    case 4:
+      if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+    case 3:
+      if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+    case 2:
+      if ((a = (*--srcptr)) > 0xBF) return false;
+
+      // no fall-through in this inner switch
+      switch (*source) {
+        case 0xE0:
+          if (a < 0xA0) return false;
+          break;
+        case 0xED:
+          if (a > 0x9F) return false;
+          break;
+        case 0xF0:
+          if (a < 0x90) return false;
+          break;
+        case 0xF4:
+          if (a > 0x8F) return false;
+          break;
+        default:
+          if (a < 0x80) return false;
+      }
+
+    case 1:
+      if (*source >= 0x80 && *source < 0xC2) return false;
+  }
+  if (*source > 0xF4) return false;
+  return true;
+}
+
+// Magic values subtracted from a buffer value during UTF8 conversion.
+// This table contains as many values as there might be trailing bytes
+// in a UTF-8 sequence.
+static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
+                                           0x00003080UL,
+                                           0x000E2080UL,
+                                           0x03C82080UL,
+                                           static_cast<UChar32>(0xFA082080UL),
+                                           static_cast<UChar32>(0x82082080UL)};
+
+static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
+  UChar32 character = 0;
+
+  // The cases all fall through.
+  switch (length) {
+    case 6:
+      character += static_cast<unsigned char>(*sequence++);
+      character <<= 6;
+    case 5:
+      character += static_cast<unsigned char>(*sequence++);
+      character <<= 6;
+    case 4:
+      character += static_cast<unsigned char>(*sequence++);
+      character <<= 6;
+    case 3:
+      character += static_cast<unsigned char>(*sequence++);
+      character <<= 6;
+    case 2:
+      character += static_cast<unsigned char>(*sequence++);
+      character <<= 6;
+    case 1:
+      character += static_cast<unsigned char>(*sequence++);
+  }
+
+  return character - offsetsFromUTF8[length - 1];
+}
+
+ConversionResult convertUTF8ToUTF16(const char** sourceStart,
+                                    const char* sourceEnd, UChar** targetStart,
+                                    UChar* targetEnd, bool* sourceAllASCII,
+                                    bool strict) {
+  ConversionResult result = conversionOK;
+  const char* source = *sourceStart;
+  UChar* target = *targetStart;
+  UChar orAllData = 0;
+  while (source < sourceEnd) {
+    int utf8SequenceLength = inlineUTF8SequenceLength(*source);
+    if (sourceEnd - source < utf8SequenceLength) {
+      result = sourceExhausted;
+      break;
+    }
+    // Do this check whether lenient or strict
+    if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source),
+                     utf8SequenceLength)) {
+      result = sourceIllegal;
+      break;
+    }
+
+    UChar32 character = readUTF8Sequence(source, utf8SequenceLength);
+
+    if (target >= targetEnd) {
+      source -= utf8SequenceLength;  // Back up source pointer!
+      result = targetExhausted;
+      break;
+    }
+
+    if (U_IS_BMP(character)) {
+      // UTF-16 surrogate values are illegal in UTF-32
+      if (U_IS_SURROGATE(character)) {
+        if (strict) {
+          source -= utf8SequenceLength;  // return to the illegal value itself
+          result = sourceIllegal;
+          break;
+        }
+        *target++ = replacementCharacter;
+        orAllData |= replacementCharacter;
+      } else {
+        *target++ = static_cast<UChar>(character);  // normal case
+        orAllData |= character;
+      }
+    } else if (U_IS_SUPPLEMENTARY(character)) {
+      // target is a character in range 0xFFFF - 0x10FFFF
+      if (target + 1 >= targetEnd) {
+        source -= utf8SequenceLength;  // Back up source pointer!
+        result = targetExhausted;
+        break;
+      }
+      *target++ = U16_LEAD(character);
+      *target++ = U16_TRAIL(character);
+      orAllData = 0xffff;
+    } else {
+      if (strict) {
+        source -= utf8SequenceLength;  // return to the start
+        result = sourceIllegal;
+        break;  // Bail out; shouldn't continue
+      } else {
+        *target++ = replacementCharacter;
+        orAllData |= replacementCharacter;
+      }
+    }
+  }
+  *sourceStart = source;
+  *targetStart = target;
+
+  if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7f);
+
+  return result;
+}
+
+// Helper to write a three-byte UTF-8 code point to the buffer, caller must
+// check room is available.
+static inline void putUTF8Triple(char*& buffer, UChar ch) {
+  *buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
+  *buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
+  *buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
+}
+
+}  // namespace
+
+// static
+String16 String16::fromInteger(int number) {
+  const size_t kBufferSize = 50;
+  char buffer[kBufferSize];
+  v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
+  return String16(buffer);
+}
+
+// static
+String16 String16::fromInteger(size_t number) {
+  const size_t kBufferSize = 50;
+  char buffer[kBufferSize];
+  v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+  return String16(buffer);
+}
+
+// static
+String16 String16::fromDouble(double number) {
+  std::ostringstream s;
+  s.imbue(std::locale("C"));
+  s << std::fixed << std::setprecision(std::numeric_limits<double>::digits10)
+    << number;
+  return String16(s.str().c_str());
+}
+
+// static
+String16 String16::fromDouble(double number, int precision) {
+  std::ostringstream s;
+  s.imbue(std::locale("C"));
+  s << std::fixed << std::setprecision(precision) << number;
+  return String16(s.str().c_str());
+}
+
+int String16::toInteger(bool* ok) const {
+  return charactersToInteger(characters16(), length(), ok);
+}
+
+String16 String16::stripWhiteSpace() const {
+  if (!length()) return String16();
+
+  size_t start = 0;
+  size_t end = length() - 1;
+
+  // skip white space from start
+  while (start <= end && isSpaceOrNewLine(characters16()[start])) ++start;
+
+  // only white space
+  if (start > end) return String16();
+
+  // skip white space from end
+  while (end && isSpaceOrNewLine(characters16()[end])) --end;
+
+  if (!start && end == length() - 1) return *this;
+  return String16(characters16() + start, end + 1 - start);
+}
+
+String16Builder::String16Builder() {}
+
+void String16Builder::append(const String16& s) {
+  m_buffer.insert(m_buffer.end(), s.characters16(),
+                  s.characters16() + s.length());
+}
+
+void String16Builder::append(UChar c) { m_buffer.push_back(c); }
+
+void String16Builder::append(char c) {
+  UChar u = c;
+  m_buffer.push_back(u);
+}
+
+void String16Builder::append(const UChar* characters, size_t length) {
+  m_buffer.insert(m_buffer.end(), characters, characters + length);
+}
+
+void String16Builder::append(const char* characters, size_t length) {
+  m_buffer.insert(m_buffer.end(), characters, characters + length);
+}
+
+String16 String16Builder::toString() {
+  return String16(m_buffer.data(), m_buffer.size());
+}
+
+void String16Builder::reserveCapacity(size_t capacity) {
+  m_buffer.reserve(capacity);
+}
+
+String16 String16::fromUTF8(const char* stringStart, size_t length) {
+  if (!stringStart || !length) return String16();
+
+  std::vector<UChar> buffer(length);
+  UChar* bufferStart = buffer.data();
+
+  UChar* bufferCurrent = bufferStart;
+  const char* stringCurrent = stringStart;
+  if (convertUTF8ToUTF16(&stringCurrent, stringStart + length, &bufferCurrent,
+                         bufferCurrent + buffer.size(), 0,
+                         true) != conversionOK)
+    return String16();
+
+  size_t utf16Length = bufferCurrent - bufferStart;
+  return String16(bufferStart, utf16Length);
+}
+
+std::string String16::utf8() const {
+  size_t length = this->length();
+
+  if (!length) return std::string("");
+
+  // Allocate a buffer big enough to hold all the characters
+  // (an individual UTF-16 UChar can only expand to 3 UTF-8 bytes).
+  // Optimization ideas, if we find this function is hot:
+  //  * We could speculatively create a CStringBuffer to contain 'length'
+  //    characters, and resize if necessary (i.e. if the buffer contains
+  //    non-ascii characters). (Alternatively, scan the buffer first for
+  //    ascii characters, so we know this will be sufficient).
+  //  * We could allocate a CStringBuffer with an appropriate size to
+  //    have a good chance of being able to write the string into the
+  //    buffer without reallocing (say, 1.5 x length).
+  if (length > std::numeric_limits<unsigned>::max() / 3) return std::string();
+  std::vector<char> bufferVector(length * 3);
+  char* buffer = bufferVector.data();
+  const UChar* characters = m_impl.data();
+
+  ConversionResult result =
+      convertUTF16ToUTF8(&characters, characters + length, &buffer,
+                         buffer + bufferVector.size(), false);
+  DCHECK(
+      result !=
+      targetExhausted);  // (length * 3) should be sufficient for any conversion
+
+  // Only produced from strict conversion.
+  DCHECK(result != sourceIllegal);
+
+  // Check for an unconverted high surrogate.
+  if (result == sourceExhausted) {
+    // This should be one unpaired high surrogate. Treat it the same
+    // was as an unpaired high surrogate would have been handled in
+    // the middle of a string with non-strict conversion - which is
+    // to say, simply encode it to UTF-8.
+    DCHECK((characters + 1) == (m_impl.data() + length));
+    DCHECK((*characters >= 0xD800) && (*characters <= 0xDBFF));
+    // There should be room left, since one UChar hasn't been
+    // converted.
+    DCHECK((buffer + 3) <= (buffer + bufferVector.size()));
+    putUTF8Triple(buffer, *characters);
+  }
+
+  return std::string(bufferVector.data(), buffer - bufferVector.data());
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/string-16.h b/src/inspector/string-16.h
new file mode 100644
index 0000000..6dc7759
--- /dev/null
+++ b/src/inspector/string-16.h
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_STRING16_H_
+#define V8_INSPECTOR_STRING16_H_
+
+#include <stdint.h>
+#include <cctype>
+#include <climits>
+#include <cstring>
+#include <string>
+#include <vector>
+
+namespace v8_inspector {
+
+using UChar = uint16_t;
+
+class String16 {
+ public:
+  static const size_t kNotFound = static_cast<size_t>(-1);
+
+  String16() {}
+  String16(const String16& other) : m_impl(other.m_impl) {}
+  String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
+  String16(const UChar* characters)  // NOLINT(runtime/explicit)
+      : m_impl(characters) {}
+  String16(const char* characters)  // NOLINT(runtime/explicit)
+      : String16(characters, std::strlen(characters)) {}
+  String16(const char* characters, size_t size) {
+    m_impl.resize(size);
+    for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
+  }
+
+  static String16 fromInteger(int);
+  static String16 fromInteger(size_t);
+  static String16 fromDouble(double);
+  static String16 fromDouble(double, int precision);
+
+  int toInteger(bool* ok = nullptr) const;
+  String16 stripWhiteSpace() const;
+  const UChar* characters16() const { return m_impl.c_str(); }
+  size_t length() const { return m_impl.length(); }
+  bool isEmpty() const { return !m_impl.length(); }
+  UChar operator[](size_t index) const { return m_impl[index]; }
+  String16 substring(size_t pos, size_t len = UINT_MAX) const {
+    return String16(m_impl.substr(pos, len));
+  }
+  size_t find(const String16& str, size_t start = 0) const {
+    return m_impl.find(str.m_impl, start);
+  }
+  size_t reverseFind(const String16& str, size_t start = UINT_MAX) const {
+    return m_impl.rfind(str.m_impl, start);
+  }
+  void swap(String16& other) { m_impl.swap(other.m_impl); }
+
+  // Convenience methods.
+  std::string utf8() const;
+  static String16 fromUTF8(const char* stringStart, size_t length);
+
+  const std::basic_string<UChar>& impl() const { return m_impl; }
+  explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+
+  std::size_t hash() const {
+    if (!has_hash) {
+      size_t hash = 0;
+      for (size_t i = 0; i < length(); ++i) hash = 31 * hash + m_impl[i];
+      hash_code = hash;
+      has_hash = true;
+    }
+    return hash_code;
+  }
+
+ private:
+  std::basic_string<UChar> m_impl;
+  mutable bool has_hash = false;
+  mutable std::size_t hash_code = 0;
+};
+
+inline bool operator==(const String16& a, const String16& b) {
+  return a.impl() == b.impl();
+}
+inline bool operator<(const String16& a, const String16& b) {
+  return a.impl() < b.impl();
+}
+inline bool operator!=(const String16& a, const String16& b) {
+  return a.impl() != b.impl();
+}
+inline bool operator==(const String16& a, const char* b) {
+  return a.impl() == String16(b).impl();
+}
+inline String16 operator+(const String16& a, const char* b) {
+  return String16(a.impl() + String16(b).impl());
+}
+inline String16 operator+(const char* a, const String16& b) {
+  return String16(String16(a).impl() + b.impl());
+}
+inline String16 operator+(const String16& a, const String16& b) {
+  return String16(a.impl() + b.impl());
+}
+
+class String16Builder {
+ public:
+  String16Builder();
+  void append(const String16&);
+  void append(UChar);
+  void append(char);
+  void append(const UChar*, size_t);
+  void append(const char*, size_t);
+  String16 toString();
+  void reserveCapacity(size_t);
+
+ private:
+  std::vector<UChar> m_buffer;
+};
+
+}  // namespace v8_inspector
+
+#if !defined(__APPLE__) || defined(_LIBCPP_VERSION)
+
+namespace std {
+template <>
+struct hash<v8_inspector::String16> {
+  std::size_t operator()(const v8_inspector::String16& string) const {
+    return string.hash();
+  }
+};
+
+}  // namespace std
+
+#endif  // !defined(__APPLE__) || defined(_LIBCPP_VERSION)
+
+#endif  // V8_INSPECTOR_STRING16_H_
diff --git a/src/inspector/string-util.cc b/src/inspector/string-util.cc
new file mode 100644
index 0000000..e6b83a5
--- /dev/null
+++ b/src/inspector/string-util.cc
@@ -0,0 +1,218 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/string-util.h"
+
+#include "src/inspector/protocol/Protocol.h"
+
+namespace v8_inspector {
+
+v8::Local<v8::String> toV8String(v8::Isolate* isolate, const String16& string) {
+  if (string.isEmpty()) return v8::String::Empty(isolate);
+  DCHECK(string.length() < v8::String::kMaxLength);
+  return v8::String::NewFromTwoByte(
+             isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+             v8::NewStringType::kNormal, static_cast<int>(string.length()))
+      .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
+                                             const String16& string) {
+  if (string.isEmpty()) return v8::String::Empty(isolate);
+  DCHECK(string.length() < v8::String::kMaxLength);
+  return v8::String::NewFromTwoByte(
+             isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+             v8::NewStringType::kInternalized,
+             static_cast<int>(string.length()))
+      .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
+                                             const char* str) {
+  return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kInternalized)
+      .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8String(v8::Isolate* isolate,
+                                 const StringView& string) {
+  if (!string.length()) return v8::String::Empty(isolate);
+  DCHECK(string.length() < v8::String::kMaxLength);
+  if (string.is8Bit())
+    return v8::String::NewFromOneByte(
+               isolate, reinterpret_cast<const uint8_t*>(string.characters8()),
+               v8::NewStringType::kNormal, static_cast<int>(string.length()))
+        .ToLocalChecked();
+  return v8::String::NewFromTwoByte(
+             isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+             v8::NewStringType::kNormal, static_cast<int>(string.length()))
+      .ToLocalChecked();
+}
+
+String16 toProtocolString(v8::Local<v8::String> value) {
+  if (value.IsEmpty() || value->IsNull() || value->IsUndefined())
+    return String16();
+  std::unique_ptr<UChar[]> buffer(new UChar[value->Length()]);
+  value->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, value->Length());
+  return String16(buffer.get(), value->Length());
+}
+
+String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value> value) {
+  if (value.IsEmpty() || !value->IsString()) return String16();
+  return toProtocolString(value.As<v8::String>());
+}
+
+String16 toString16(const StringView& string) {
+  if (!string.length()) return String16();
+  if (string.is8Bit())
+    return String16(reinterpret_cast<const char*>(string.characters8()),
+                    string.length());
+  return String16(reinterpret_cast<const UChar*>(string.characters16()),
+                  string.length());
+}
+
+StringView toStringView(const String16& string) {
+  if (string.isEmpty()) return StringView();
+  return StringView(reinterpret_cast<const uint16_t*>(string.characters16()),
+                    string.length());
+}
+
+bool stringViewStartsWith(const StringView& string, const char* prefix) {
+  if (!string.length()) return !(*prefix);
+  if (string.is8Bit()) {
+    for (size_t i = 0, j = 0; prefix[j] && i < string.length(); ++i, ++j) {
+      if (string.characters8()[i] != prefix[j]) return false;
+    }
+  } else {
+    for (size_t i = 0, j = 0; prefix[j] && i < string.length(); ++i, ++j) {
+      if (string.characters16()[i] != prefix[j]) return false;
+    }
+  }
+  return true;
+}
+
+namespace protocol {
+
+std::unique_ptr<protocol::Value> parseJSON(const StringView& string) {
+  if (!string.length()) return nullptr;
+  if (string.is8Bit()) {
+    return protocol::parseJSON(string.characters8(),
+                               static_cast<int>(string.length()));
+  }
+  return protocol::parseJSON(string.characters16(),
+                             static_cast<int>(string.length()));
+}
+
+std::unique_ptr<protocol::Value> parseJSON(const String16& string) {
+  if (!string.length()) return nullptr;
+  return protocol::parseJSON(string.characters16(),
+                             static_cast<int>(string.length()));
+}
+
+}  // namespace protocol
+
+std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
+                                                 v8::Local<v8::Context> context,
+                                                 v8::Local<v8::Value> value,
+                                                 int maxDepth) {
+  if (value.IsEmpty()) {
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  if (!maxDepth) {
+    *errorString = "Object reference chain is too long";
+    return nullptr;
+  }
+  maxDepth--;
+
+  if (value->IsNull() || value->IsUndefined()) return protocol::Value::null();
+  if (value->IsBoolean())
+    return protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
+  if (value->IsNumber()) {
+    double doubleValue = value.As<v8::Number>()->Value();
+    int intValue = static_cast<int>(doubleValue);
+    if (intValue == doubleValue)
+      return protocol::FundamentalValue::create(intValue);
+    return protocol::FundamentalValue::create(doubleValue);
+  }
+  if (value->IsString())
+    return protocol::StringValue::create(
+        toProtocolString(value.As<v8::String>()));
+  if (value->IsArray()) {
+    v8::Local<v8::Array> array = value.As<v8::Array>();
+    std::unique_ptr<protocol::ListValue> inspectorArray =
+        protocol::ListValue::create();
+    uint32_t length = array->Length();
+    for (uint32_t i = 0; i < length; i++) {
+      v8::Local<v8::Value> value;
+      if (!array->Get(context, i).ToLocal(&value)) {
+        *errorString = "Internal error";
+        return nullptr;
+      }
+      std::unique_ptr<protocol::Value> element =
+          toProtocolValue(errorString, context, value, maxDepth);
+      if (!element) return nullptr;
+      inspectorArray->pushValue(std::move(element));
+    }
+    return std::move(inspectorArray);
+  }
+  if (value->IsObject()) {
+    std::unique_ptr<protocol::DictionaryValue> jsonObject =
+        protocol::DictionaryValue::create();
+    v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+    v8::Local<v8::Array> propertyNames;
+    if (!object->GetPropertyNames(context).ToLocal(&propertyNames)) {
+      *errorString = "Internal error";
+      return nullptr;
+    }
+    uint32_t length = propertyNames->Length();
+    for (uint32_t i = 0; i < length; i++) {
+      v8::Local<v8::Value> name;
+      if (!propertyNames->Get(context, i).ToLocal(&name)) {
+        *errorString = "Internal error";
+        return nullptr;
+      }
+      // FIXME(yurys): v8::Object should support GetOwnPropertyNames
+      if (name->IsString()) {
+        v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
+            context, v8::Local<v8::String>::Cast(name));
+        if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
+          continue;
+      }
+      v8::Local<v8::String> propertyName;
+      if (!name->ToString(context).ToLocal(&propertyName)) continue;
+      v8::Local<v8::Value> property;
+      if (!object->Get(context, name).ToLocal(&property)) {
+        *errorString = "Internal error";
+        return nullptr;
+      }
+      std::unique_ptr<protocol::Value> propertyValue =
+          toProtocolValue(errorString, context, property, maxDepth);
+      if (!propertyValue) return nullptr;
+      jsonObject->setValue(toProtocolString(propertyName),
+                           std::move(propertyValue));
+    }
+    return std::move(jsonObject);
+  }
+  *errorString = "Object couldn't be returned by value";
+  return nullptr;
+}
+
+// static
+std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
+  String16 owner = toString16(string);
+  return StringBufferImpl::adopt(owner);
+}
+
+// static
+std::unique_ptr<StringBufferImpl> StringBufferImpl::adopt(String16& string) {
+  return wrapUnique(new StringBufferImpl(string));
+}
+
+StringBufferImpl::StringBufferImpl(String16& string) {
+  m_owner.swap(string);
+  m_string = toStringView(m_owner);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/string-util.h b/src/inspector/string-util.h
new file mode 100644
index 0000000..30137b8
--- /dev/null
+++ b/src/inspector/string-util.h
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_STRINGUTIL_H_
+#define V8_INSPECTOR_STRINGUTIL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace protocol {
+
+class Value;
+
+using String = v8_inspector::String16;
+using StringBuilder = v8_inspector::String16Builder;
+
+class StringUtil {
+ public:
+  static String substring(const String& s, size_t pos, size_t len) {
+    return s.substring(pos, len);
+  }
+  static String fromInteger(int number) { return String::fromInteger(number); }
+  static String fromInteger(size_t number) {
+    return String::fromInteger(number);
+  }
+  static String fromDouble(double number) { return String::fromDouble(number); }
+  static const size_t kNotFound = String::kNotFound;
+  static void builderReserve(StringBuilder& builder, size_t capacity) {
+    builder.reserveCapacity(capacity);
+  }
+};
+
+std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
+std::unique_ptr<protocol::Value> parseJSON(const String16& json);
+
+}  // namespace protocol
+
+std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
+                                                 v8::Local<v8::Context>,
+                                                 v8::Local<v8::Value>,
+                                                 int maxDepth = 1000);
+
+v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const String16&);
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const char*);
+v8::Local<v8::String> toV8String(v8::Isolate*, const StringView&);
+// TODO(dgozman): rename to toString16.
+String16 toProtocolString(v8::Local<v8::String>);
+String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value>);
+String16 toString16(const StringView&);
+StringView toStringView(const String16&);
+bool stringViewStartsWith(const StringView&, const char*);
+
+class StringBufferImpl : public StringBuffer {
+ public:
+  // Destroys string's content.
+  static std::unique_ptr<StringBufferImpl> adopt(String16&);
+  const StringView& string() override { return m_string; }
+
+ private:
+  explicit StringBufferImpl(String16&);
+  String16 m_owner;
+  StringView m_string;
+
+  DISALLOW_COPY_AND_ASSIGN(StringBufferImpl);
+};
+
+}  //  namespace v8_inspector
+
+#endif  // V8_INSPECTOR_STRINGUTIL_H_
diff --git a/src/inspector/v8-console-agent-impl.cc b/src/inspector/v8-console-agent-impl.cc
new file mode 100644
index 0000000..8eb883c
--- /dev/null
+++ b/src/inspector/v8-console-agent-impl.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console-agent-impl.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+namespace v8_inspector {
+
+namespace ConsoleAgentState {
+static const char consoleEnabled[] = "consoleEnabled";
+}
+
+V8ConsoleAgentImpl::V8ConsoleAgentImpl(
+    V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+    protocol::DictionaryValue* state)
+    : m_session(session),
+      m_state(state),
+      m_frontend(frontendChannel),
+      m_enabled(false) {}
+
+V8ConsoleAgentImpl::~V8ConsoleAgentImpl() {}
+
+void V8ConsoleAgentImpl::enable(ErrorString* errorString) {
+  if (m_enabled) return;
+  m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
+  m_enabled = true;
+  m_session->inspector()->enableStackCapturingIfNeeded();
+  reportAllMessages();
+}
+
+void V8ConsoleAgentImpl::disable(ErrorString* errorString) {
+  if (!m_enabled) return;
+  m_session->inspector()->disableStackCapturingIfNeeded();
+  m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
+  m_enabled = false;
+}
+
+void V8ConsoleAgentImpl::clearMessages(ErrorString* errorString) {}
+
+void V8ConsoleAgentImpl::restore() {
+  if (!m_state->booleanProperty(ConsoleAgentState::consoleEnabled, false))
+    return;
+  ErrorString ignored;
+  enable(&ignored);
+}
+
+void V8ConsoleAgentImpl::messageAdded(V8ConsoleMessage* message) {
+  if (m_enabled) reportMessage(message, true);
+}
+
+bool V8ConsoleAgentImpl::enabled() { return m_enabled; }
+
+void V8ConsoleAgentImpl::reportAllMessages() {
+  V8ConsoleMessageStorage* storage =
+      m_session->inspector()->ensureConsoleMessageStorage(
+          m_session->contextGroupId());
+  for (const auto& message : storage->messages()) {
+    if (message->origin() == V8MessageOrigin::kConsole) {
+      if (!reportMessage(message.get(), false)) return;
+    }
+  }
+}
+
+bool V8ConsoleAgentImpl::reportMessage(V8ConsoleMessage* message,
+                                       bool generatePreview) {
+  DCHECK(message->origin() == V8MessageOrigin::kConsole);
+  message->reportToFrontend(&m_frontend);
+  m_frontend.flush();
+  return m_session->inspector()->hasConsoleMessageStorage(
+      m_session->contextGroupId());
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-console-agent-impl.h b/src/inspector/v8-console-agent-impl.h
new file mode 100644
index 0000000..f3d598b
--- /dev/null
+++ b/src/inspector/v8-console-agent-impl.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#define V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Console.h"
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+class V8ConsoleMessage;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8ConsoleAgentImpl : public protocol::Console::Backend {
+ public:
+  V8ConsoleAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                     protocol::DictionaryValue* state);
+  ~V8ConsoleAgentImpl() override;
+
+  void enable(ErrorString*) override;
+  void disable(ErrorString*) override;
+  void clearMessages(ErrorString*) override;
+
+  void restore();
+  void messageAdded(V8ConsoleMessage*);
+  void reset();
+  bool enabled();
+
+ private:
+  void reportAllMessages();
+  bool reportMessage(V8ConsoleMessage*, bool generatePreview);
+
+  V8InspectorSessionImpl* m_session;
+  protocol::DictionaryValue* m_state;
+  protocol::Console::Frontend m_frontend;
+  bool m_enabled;
+
+  DISALLOW_COPY_AND_ASSIGN(V8ConsoleAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
diff --git a/src/inspector/v8-console-message.cc b/src/inspector/v8-console-message.cc
new file mode 100644
index 0000000..63f1d49
--- /dev/null
+++ b/src/inspector/v8-console-message.cc
@@ -0,0 +1,485 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console-message.h"
+
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+String16 consoleAPITypeValue(ConsoleAPIType type) {
+  switch (type) {
+    case ConsoleAPIType::kLog:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
+    case ConsoleAPIType::kDebug:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+    case ConsoleAPIType::kInfo:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Info;
+    case ConsoleAPIType::kError:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Error;
+    case ConsoleAPIType::kWarning:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Warning;
+    case ConsoleAPIType::kClear:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Clear;
+    case ConsoleAPIType::kDir:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Dir;
+    case ConsoleAPIType::kDirXML:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Dirxml;
+    case ConsoleAPIType::kTable:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Table;
+    case ConsoleAPIType::kTrace:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Trace;
+    case ConsoleAPIType::kStartGroup:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::StartGroup;
+    case ConsoleAPIType::kStartGroupCollapsed:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::StartGroupCollapsed;
+    case ConsoleAPIType::kEndGroup:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::EndGroup;
+    case ConsoleAPIType::kAssert:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Assert;
+    case ConsoleAPIType::kTimeEnd:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+    case ConsoleAPIType::kCount:
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+  }
+  return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
+}
+
+const unsigned maxConsoleMessageCount = 1000;
+const unsigned maxArrayItemsLimit = 10000;
+const unsigned maxStackDepthLimit = 32;
+
+class V8ValueStringBuilder {
+ public:
+  static String16 toString(v8::Local<v8::Value> value,
+                           v8::Local<v8::Context> context) {
+    V8ValueStringBuilder builder(context);
+    if (!builder.append(value)) return String16();
+    return builder.toString();
+  }
+
+ private:
+  enum {
+    IgnoreNull = 1 << 0,
+    IgnoreUndefined = 1 << 1,
+  };
+
+  explicit V8ValueStringBuilder(v8::Local<v8::Context> context)
+      : m_arrayLimit(maxArrayItemsLimit),
+        m_isolate(context->GetIsolate()),
+        m_tryCatch(context->GetIsolate()),
+        m_context(context) {}
+
+  bool append(v8::Local<v8::Value> value, unsigned ignoreOptions = 0) {
+    if (value.IsEmpty()) return true;
+    if ((ignoreOptions & IgnoreNull) && value->IsNull()) return true;
+    if ((ignoreOptions & IgnoreUndefined) && value->IsUndefined()) return true;
+    if (value->IsString()) return append(v8::Local<v8::String>::Cast(value));
+    if (value->IsStringObject())
+      return append(v8::Local<v8::StringObject>::Cast(value)->ValueOf());
+    if (value->IsSymbol()) return append(v8::Local<v8::Symbol>::Cast(value));
+    if (value->IsSymbolObject())
+      return append(v8::Local<v8::SymbolObject>::Cast(value)->ValueOf());
+    if (value->IsNumberObject()) {
+      m_builder.append(String16::fromDouble(
+          v8::Local<v8::NumberObject>::Cast(value)->ValueOf(), 6));
+      return true;
+    }
+    if (value->IsBooleanObject()) {
+      m_builder.append(v8::Local<v8::BooleanObject>::Cast(value)->ValueOf()
+                           ? "true"
+                           : "false");
+      return true;
+    }
+    if (value->IsArray()) return append(v8::Local<v8::Array>::Cast(value));
+    if (value->IsProxy()) {
+      m_builder.append("[object Proxy]");
+      return true;
+    }
+    if (value->IsObject() && !value->IsDate() && !value->IsFunction() &&
+        !value->IsNativeError() && !value->IsRegExp()) {
+      v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+      v8::Local<v8::String> stringValue;
+      if (object->ObjectProtoToString(m_isolate->GetCurrentContext())
+              .ToLocal(&stringValue))
+        return append(stringValue);
+    }
+    v8::Local<v8::String> stringValue;
+    if (!value->ToString(m_isolate->GetCurrentContext()).ToLocal(&stringValue))
+      return false;
+    return append(stringValue);
+  }
+
+  bool append(v8::Local<v8::Array> array) {
+    for (const auto& it : m_visitedArrays) {
+      if (it == array) return true;
+    }
+    uint32_t length = array->Length();
+    if (length > m_arrayLimit) return false;
+    if (m_visitedArrays.size() > maxStackDepthLimit) return false;
+
+    bool result = true;
+    m_arrayLimit -= length;
+    m_visitedArrays.push_back(array);
+    for (uint32_t i = 0; i < length; ++i) {
+      if (i) m_builder.append(',');
+      v8::Local<v8::Value> value;
+      if (!array->Get(m_context, i).ToLocal(&value)) continue;
+      if (!append(value, IgnoreNull | IgnoreUndefined)) {
+        result = false;
+        break;
+      }
+    }
+    m_visitedArrays.pop_back();
+    return result;
+  }
+
+  bool append(v8::Local<v8::Symbol> symbol) {
+    m_builder.append("Symbol(");
+    bool result = append(symbol->Name(), IgnoreUndefined);
+    m_builder.append(')');
+    return result;
+  }
+
+  bool append(v8::Local<v8::String> string) {
+    if (m_tryCatch.HasCaught()) return false;
+    if (!string.IsEmpty()) m_builder.append(toProtocolString(string));
+    return true;
+  }
+
+  String16 toString() {
+    if (m_tryCatch.HasCaught()) return String16();
+    return m_builder.toString();
+  }
+
+  uint32_t m_arrayLimit;
+  v8::Isolate* m_isolate;
+  String16Builder m_builder;
+  std::vector<v8::Local<v8::Array>> m_visitedArrays;
+  v8::TryCatch m_tryCatch;
+  v8::Local<v8::Context> m_context;
+};
+
+}  // namespace
+
+V8ConsoleMessage::V8ConsoleMessage(V8MessageOrigin origin, double timestamp,
+                                   const String16& message)
+    : m_origin(origin),
+      m_timestamp(timestamp),
+      m_message(message),
+      m_lineNumber(0),
+      m_columnNumber(0),
+      m_scriptId(0),
+      m_contextId(0),
+      m_type(ConsoleAPIType::kLog),
+      m_exceptionId(0),
+      m_revokedExceptionId(0) {}
+
+V8ConsoleMessage::~V8ConsoleMessage() {}
+
+void V8ConsoleMessage::setLocation(const String16& url, unsigned lineNumber,
+                                   unsigned columnNumber,
+                                   std::unique_ptr<V8StackTraceImpl> stackTrace,
+                                   int scriptId) {
+  m_url = url;
+  m_lineNumber = lineNumber;
+  m_columnNumber = columnNumber;
+  m_stackTrace = std::move(stackTrace);
+  m_scriptId = scriptId;
+}
+
+void V8ConsoleMessage::reportToFrontend(
+    protocol::Console::Frontend* frontend) const {
+  DCHECK(m_origin == V8MessageOrigin::kConsole);
+  String16 level = protocol::Console::ConsoleMessage::LevelEnum::Log;
+  if (m_type == ConsoleAPIType::kDebug || m_type == ConsoleAPIType::kCount ||
+      m_type == ConsoleAPIType::kTimeEnd)
+    level = protocol::Console::ConsoleMessage::LevelEnum::Debug;
+  else if (m_type == ConsoleAPIType::kError ||
+           m_type == ConsoleAPIType::kAssert)
+    level = protocol::Console::ConsoleMessage::LevelEnum::Error;
+  else if (m_type == ConsoleAPIType::kWarning)
+    level = protocol::Console::ConsoleMessage::LevelEnum::Warning;
+  else if (m_type == ConsoleAPIType::kInfo)
+    level = protocol::Console::ConsoleMessage::LevelEnum::Info;
+  std::unique_ptr<protocol::Console::ConsoleMessage> result =
+      protocol::Console::ConsoleMessage::create()
+          .setSource(protocol::Console::ConsoleMessage::SourceEnum::ConsoleApi)
+          .setLevel(level)
+          .setText(m_message)
+          .build();
+  result->setLine(static_cast<int>(m_lineNumber));
+  result->setColumn(static_cast<int>(m_columnNumber));
+  result->setUrl(m_url);
+  frontend->messageAdded(std::move(result));
+}
+
+std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
+                                bool generatePreview) const {
+  V8InspectorImpl* inspector = session->inspector();
+  int contextGroupId = session->contextGroupId();
+  int contextId = m_contextId;
+  if (!m_arguments.size() || !contextId) return nullptr;
+  InspectedContext* inspectedContext =
+      inspector->getContext(contextGroupId, contextId);
+  if (!inspectedContext) return nullptr;
+
+  v8::Isolate* isolate = inspectedContext->isolate();
+  v8::HandleScope handles(isolate);
+  v8::Local<v8::Context> context = inspectedContext->context();
+
+  std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>> args =
+      protocol::Array<protocol::Runtime::RemoteObject>::create();
+  if (m_type == ConsoleAPIType::kTable && generatePreview) {
+    v8::Local<v8::Value> table = m_arguments[0]->Get(isolate);
+    v8::Local<v8::Value> columns = m_arguments.size() > 1
+                                       ? m_arguments[1]->Get(isolate)
+                                       : v8::Local<v8::Value>();
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
+        session->wrapTable(context, table, columns);
+    inspectedContext = inspector->getContext(contextGroupId, contextId);
+    if (!inspectedContext) return nullptr;
+    if (wrapped)
+      args->addItem(std::move(wrapped));
+    else
+      args = nullptr;
+  } else {
+    for (size_t i = 0; i < m_arguments.size(); ++i) {
+      std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
+          session->wrapObject(context, m_arguments[i]->Get(isolate), "console",
+                              generatePreview);
+      inspectedContext = inspector->getContext(contextGroupId, contextId);
+      if (!inspectedContext) return nullptr;
+      if (!wrapped) {
+        args = nullptr;
+        break;
+      }
+      args->addItem(std::move(wrapped));
+    }
+  }
+  return args;
+}
+
+void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
+                                        V8InspectorSessionImpl* session,
+                                        bool generatePreview) const {
+  int contextGroupId = session->contextGroupId();
+  V8InspectorImpl* inspector = session->inspector();
+
+  if (m_origin == V8MessageOrigin::kException) {
+    std::unique_ptr<protocol::Runtime::RemoteObject> exception =
+        wrapException(session, generatePreview);
+    if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+    std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+        protocol::Runtime::ExceptionDetails::create()
+            .setExceptionId(m_exceptionId)
+            .setText(exception ? m_message : m_detailedMessage)
+            .setLineNumber(m_lineNumber ? m_lineNumber - 1 : 0)
+            .setColumnNumber(m_columnNumber ? m_columnNumber - 1 : 0)
+            .build();
+    if (m_scriptId)
+      exceptionDetails->setScriptId(String16::fromInteger(m_scriptId));
+    if (!m_url.isEmpty()) exceptionDetails->setUrl(m_url);
+    if (m_stackTrace)
+      exceptionDetails->setStackTrace(m_stackTrace->buildInspectorObjectImpl());
+    if (m_contextId) exceptionDetails->setExecutionContextId(m_contextId);
+    if (exception) exceptionDetails->setException(std::move(exception));
+    frontend->exceptionThrown(m_timestamp, std::move(exceptionDetails));
+    return;
+  }
+  if (m_origin == V8MessageOrigin::kRevokedException) {
+    frontend->exceptionRevoked(m_message, m_revokedExceptionId);
+    return;
+  }
+  if (m_origin == V8MessageOrigin::kConsole) {
+    std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+        arguments = wrapArguments(session, generatePreview);
+    if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+    if (!arguments) {
+      arguments = protocol::Array<protocol::Runtime::RemoteObject>::create();
+      if (!m_message.isEmpty()) {
+        std::unique_ptr<protocol::Runtime::RemoteObject> messageArg =
+            protocol::Runtime::RemoteObject::create()
+                .setType(protocol::Runtime::RemoteObject::TypeEnum::String)
+                .build();
+        messageArg->setValue(protocol::StringValue::create(m_message));
+        arguments->addItem(std::move(messageArg));
+      }
+    }
+    frontend->consoleAPICalled(
+        consoleAPITypeValue(m_type), std::move(arguments), m_contextId,
+        m_timestamp,
+        m_stackTrace ? m_stackTrace->buildInspectorObjectImpl() : nullptr);
+    return;
+  }
+  UNREACHABLE();
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8ConsoleMessage::wrapException(V8InspectorSessionImpl* session,
+                                bool generatePreview) const {
+  if (!m_arguments.size() || !m_contextId) return nullptr;
+  DCHECK_EQ(1u, m_arguments.size());
+  InspectedContext* inspectedContext =
+      session->inspector()->getContext(session->contextGroupId(), m_contextId);
+  if (!inspectedContext) return nullptr;
+
+  v8::Isolate* isolate = inspectedContext->isolate();
+  v8::HandleScope handles(isolate);
+  // TODO(dgozman): should we use different object group?
+  return session->wrapObject(inspectedContext->context(),
+                             m_arguments[0]->Get(isolate), "console",
+                             generatePreview);
+}
+
+V8MessageOrigin V8ConsoleMessage::origin() const { return m_origin; }
+
+ConsoleAPIType V8ConsoleMessage::type() const { return m_type; }
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
+    double timestamp, ConsoleAPIType type,
+    const std::vector<v8::Local<v8::Value>>& arguments,
+    std::unique_ptr<V8StackTraceImpl> stackTrace,
+    InspectedContext* inspectedContext) {
+  v8::Isolate* isolate = inspectedContext->isolate();
+  int contextId = inspectedContext->contextId();
+  int contextGroupId = inspectedContext->contextGroupId();
+  V8InspectorImpl* inspector = inspectedContext->inspector();
+  v8::Local<v8::Context> context = inspectedContext->context();
+
+  std::unique_ptr<V8ConsoleMessage> message = wrapUnique(
+      new V8ConsoleMessage(V8MessageOrigin::kConsole, timestamp, String16()));
+  if (stackTrace && !stackTrace->isEmpty()) {
+    message->m_url = toString16(stackTrace->topSourceURL());
+    message->m_lineNumber = stackTrace->topLineNumber();
+    message->m_columnNumber = stackTrace->topColumnNumber();
+  }
+  message->m_stackTrace = std::move(stackTrace);
+  message->m_type = type;
+  message->m_contextId = contextId;
+  for (size_t i = 0; i < arguments.size(); ++i)
+    message->m_arguments.push_back(
+        wrapUnique(new v8::Global<v8::Value>(isolate, arguments.at(i))));
+  if (arguments.size())
+    message->m_message = V8ValueStringBuilder::toString(arguments[0], context);
+
+  V8ConsoleAPIType clientType = V8ConsoleAPIType::kLog;
+  if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
+      type == ConsoleAPIType::kTimeEnd)
+    clientType = V8ConsoleAPIType::kDebug;
+  else if (type == ConsoleAPIType::kError || type == ConsoleAPIType::kAssert)
+    clientType = V8ConsoleAPIType::kError;
+  else if (type == ConsoleAPIType::kWarning)
+    clientType = V8ConsoleAPIType::kWarning;
+  else if (type == ConsoleAPIType::kInfo)
+    clientType = V8ConsoleAPIType::kInfo;
+  else if (type == ConsoleAPIType::kClear)
+    clientType = V8ConsoleAPIType::kClear;
+  inspector->client()->consoleAPIMessage(
+      contextGroupId, clientType, toStringView(message->m_message),
+      toStringView(message->m_url), message->m_lineNumber,
+      message->m_columnNumber, message->m_stackTrace.get());
+
+  return message;
+}
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForException(
+    double timestamp, const String16& detailedMessage, const String16& url,
+    unsigned lineNumber, unsigned columnNumber,
+    std::unique_ptr<V8StackTraceImpl> stackTrace, int scriptId,
+    v8::Isolate* isolate, const String16& message, int contextId,
+    v8::Local<v8::Value> exception, unsigned exceptionId) {
+  std::unique_ptr<V8ConsoleMessage> consoleMessage = wrapUnique(
+      new V8ConsoleMessage(V8MessageOrigin::kException, timestamp, message));
+  consoleMessage->setLocation(url, lineNumber, columnNumber,
+                              std::move(stackTrace), scriptId);
+  consoleMessage->m_exceptionId = exceptionId;
+  consoleMessage->m_detailedMessage = detailedMessage;
+  if (contextId && !exception.IsEmpty()) {
+    consoleMessage->m_contextId = contextId;
+    consoleMessage->m_arguments.push_back(
+        wrapUnique(new v8::Global<v8::Value>(isolate, exception)));
+  }
+  return consoleMessage;
+}
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForRevokedException(
+    double timestamp, const String16& messageText,
+    unsigned revokedExceptionId) {
+  std::unique_ptr<V8ConsoleMessage> message = wrapUnique(new V8ConsoleMessage(
+      V8MessageOrigin::kRevokedException, timestamp, messageText));
+  message->m_revokedExceptionId = revokedExceptionId;
+  return message;
+}
+
+void V8ConsoleMessage::contextDestroyed(int contextId) {
+  if (contextId != m_contextId) return;
+  m_contextId = 0;
+  if (m_message.isEmpty()) m_message = "<message collected>";
+  Arguments empty;
+  m_arguments.swap(empty);
+}
+
+// ------------------------ V8ConsoleMessageStorage ----------------------------
+
+V8ConsoleMessageStorage::V8ConsoleMessageStorage(V8InspectorImpl* inspector,
+                                                 int contextGroupId)
+    : m_inspector(inspector),
+      m_contextGroupId(contextGroupId),
+      m_expiredCount(0) {}
+
+V8ConsoleMessageStorage::~V8ConsoleMessageStorage() { clear(); }
+
+void V8ConsoleMessageStorage::addMessage(
+    std::unique_ptr<V8ConsoleMessage> message) {
+  int contextGroupId = m_contextGroupId;
+  V8InspectorImpl* inspector = m_inspector;
+  if (message->type() == ConsoleAPIType::kClear) clear();
+
+  V8InspectorSessionImpl* session =
+      inspector->sessionForContextGroup(contextGroupId);
+  if (session) {
+    if (message->origin() == V8MessageOrigin::kConsole)
+      session->consoleAgent()->messageAdded(message.get());
+    session->runtimeAgent()->messageAdded(message.get());
+  }
+  if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+
+  DCHECK(m_messages.size() <= maxConsoleMessageCount);
+  if (m_messages.size() == maxConsoleMessageCount) {
+    ++m_expiredCount;
+    m_messages.pop_front();
+  }
+  m_messages.push_back(std::move(message));
+}
+
+void V8ConsoleMessageStorage::clear() {
+  m_messages.clear();
+  m_expiredCount = 0;
+  if (V8InspectorSessionImpl* session =
+          m_inspector->sessionForContextGroup(m_contextGroupId))
+    session->releaseObjectGroup("console");
+}
+
+void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
+  for (size_t i = 0; i < m_messages.size(); ++i)
+    m_messages[i]->contextDestroyed(contextId);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-console-message.h b/src/inspector/v8-console-message.h
new file mode 100644
index 0000000..a6e9eaf
--- /dev/null
+++ b/src/inspector/v8-console-message.h
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#define V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+
+#include <deque>
+#include "include/v8.h"
+#include "src/inspector/protocol/Console.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+class V8StackTraceImpl;
+
+enum class V8MessageOrigin { kConsole, kException, kRevokedException };
+
+enum class ConsoleAPIType {
+  kLog,
+  kDebug,
+  kInfo,
+  kError,
+  kWarning,
+  kDir,
+  kDirXML,
+  kTable,
+  kTrace,
+  kStartGroup,
+  kStartGroupCollapsed,
+  kEndGroup,
+  kClear,
+  kAssert,
+  kTimeEnd,
+  kCount
+};
+
+class V8ConsoleMessage {
+ public:
+  ~V8ConsoleMessage();
+
+  static std::unique_ptr<V8ConsoleMessage> createForConsoleAPI(
+      double timestamp, ConsoleAPIType,
+      const std::vector<v8::Local<v8::Value>>& arguments,
+      std::unique_ptr<V8StackTraceImpl>, InspectedContext*);
+
+  static std::unique_ptr<V8ConsoleMessage> createForException(
+      double timestamp, const String16& detailedMessage, const String16& url,
+      unsigned lineNumber, unsigned columnNumber,
+      std::unique_ptr<V8StackTraceImpl>, int scriptId, v8::Isolate*,
+      const String16& message, int contextId, v8::Local<v8::Value> exception,
+      unsigned exceptionId);
+
+  static std::unique_ptr<V8ConsoleMessage> createForRevokedException(
+      double timestamp, const String16& message, unsigned revokedExceptionId);
+
+  V8MessageOrigin origin() const;
+  void reportToFrontend(protocol::Console::Frontend*) const;
+  void reportToFrontend(protocol::Runtime::Frontend*, V8InspectorSessionImpl*,
+                        bool generatePreview) const;
+  ConsoleAPIType type() const;
+  void contextDestroyed(int contextId);
+
+ private:
+  V8ConsoleMessage(V8MessageOrigin, double timestamp, const String16& message);
+
+  using Arguments = std::vector<std::unique_ptr<v8::Global<v8::Value>>>;
+  std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+  wrapArguments(V8InspectorSessionImpl*, bool generatePreview) const;
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapException(
+      V8InspectorSessionImpl*, bool generatePreview) const;
+  void setLocation(const String16& url, unsigned lineNumber,
+                   unsigned columnNumber, std::unique_ptr<V8StackTraceImpl>,
+                   int scriptId);
+
+  V8MessageOrigin m_origin;
+  double m_timestamp;
+  String16 m_message;
+  String16 m_url;
+  unsigned m_lineNumber;
+  unsigned m_columnNumber;
+  std::unique_ptr<V8StackTraceImpl> m_stackTrace;
+  int m_scriptId;
+  int m_contextId;
+  ConsoleAPIType m_type;
+  unsigned m_exceptionId;
+  unsigned m_revokedExceptionId;
+  Arguments m_arguments;
+  String16 m_detailedMessage;
+};
+
+class V8ConsoleMessageStorage {
+ public:
+  V8ConsoleMessageStorage(V8InspectorImpl*, int contextGroupId);
+  ~V8ConsoleMessageStorage();
+
+  int contextGroupId() { return m_contextGroupId; }
+  int expiredCount() { return m_expiredCount; }
+  const std::deque<std::unique_ptr<V8ConsoleMessage>>& messages() const {
+    return m_messages;
+  }
+
+  void addMessage(std::unique_ptr<V8ConsoleMessage>);
+  void contextDestroyed(int contextId);
+  void clear();
+
+ private:
+  V8InspectorImpl* m_inspector;
+  int m_contextGroupId;
+  int m_expiredCount;
+  std::deque<std::unique_ptr<V8ConsoleMessage>> m_messages;
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8CONSOLEMESSAGE_H_
diff --git a/src/inspector/v8-console.cc b/src/inspector/v8-console.cc
new file mode 100644
index 0000000..ddd4bf6
--- /dev/null
+++ b/src/inspector/v8-console.cc
@@ -0,0 +1,922 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console.h"
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+v8::Local<v8::Private> inspectedContextPrivateKey(v8::Isolate* isolate) {
+  return v8::Private::ForApi(
+      isolate, toV8StringInternalized(isolate, "V8Console#InspectedContext"));
+}
+
+class ConsoleHelper {
+ public:
+  explicit ConsoleHelper(const v8::FunctionCallbackInfo<v8::Value>& info)
+      : m_info(info),
+        m_isolate(info.GetIsolate()),
+        m_context(info.GetIsolate()->GetCurrentContext()),
+        m_inspectedContext(nullptr),
+        m_inspectorClient(nullptr) {}
+
+  v8::Local<v8::Object> ensureConsole() {
+    if (m_console.IsEmpty()) {
+      DCHECK(!m_info.Data().IsEmpty());
+      DCHECK(!m_info.Data()->IsUndefined());
+      m_console = m_info.Data().As<v8::Object>();
+    }
+    return m_console;
+  }
+
+  InspectedContext* ensureInspectedContext() {
+    if (m_inspectedContext) return m_inspectedContext;
+    v8::Local<v8::Object> console = ensureConsole();
+
+    v8::Local<v8::Private> key = inspectedContextPrivateKey(m_isolate);
+    v8::Local<v8::Value> inspectedContextValue;
+    if (!console->GetPrivate(m_context, key).ToLocal(&inspectedContextValue))
+      return nullptr;
+    DCHECK(inspectedContextValue->IsExternal());
+    m_inspectedContext = static_cast<InspectedContext*>(
+        inspectedContextValue.As<v8::External>()->Value());
+    return m_inspectedContext;
+  }
+
+  V8InspectorClient* ensureDebuggerClient() {
+    if (m_inspectorClient) return m_inspectorClient;
+    InspectedContext* inspectedContext = ensureInspectedContext();
+    if (!inspectedContext) return nullptr;
+    m_inspectorClient = inspectedContext->inspector()->client();
+    return m_inspectorClient;
+  }
+
+  void reportCall(ConsoleAPIType type) {
+    if (!m_info.Length()) return;
+    std::vector<v8::Local<v8::Value>> arguments;
+    for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
+    reportCall(type, arguments);
+  }
+
+  void reportCallWithDefaultArgument(ConsoleAPIType type,
+                                     const String16& message) {
+    std::vector<v8::Local<v8::Value>> arguments;
+    for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
+    if (!m_info.Length()) arguments.push_back(toV8String(m_isolate, message));
+    reportCall(type, arguments);
+  }
+
+  void reportCallWithArgument(ConsoleAPIType type, const String16& message) {
+    std::vector<v8::Local<v8::Value>> arguments(1,
+                                                toV8String(m_isolate, message));
+    reportCall(type, arguments);
+  }
+
+  void reportCall(ConsoleAPIType type,
+                  const std::vector<v8::Local<v8::Value>>& arguments) {
+    InspectedContext* inspectedContext = ensureInspectedContext();
+    if (!inspectedContext) return;
+    int contextGroupId = inspectedContext->contextGroupId();
+    V8InspectorImpl* inspector = inspectedContext->inspector();
+    std::unique_ptr<V8ConsoleMessage> message =
+        V8ConsoleMessage::createForConsoleAPI(
+            inspector->client()->currentTimeMS(), type, arguments,
+            inspector->debugger()->captureStackTrace(false), inspectedContext);
+    inspector->ensureConsoleMessageStorage(contextGroupId)
+        ->addMessage(std::move(message));
+  }
+
+  void reportDeprecatedCall(const char* id, const String16& message) {
+    if (checkAndSetPrivateFlagOnConsole(id, false)) return;
+    std::vector<v8::Local<v8::Value>> arguments(1,
+                                                toV8String(m_isolate, message));
+    reportCall(ConsoleAPIType::kWarning, arguments);
+  }
+
+  bool firstArgToBoolean(bool defaultValue) {
+    if (m_info.Length() < 1) return defaultValue;
+    if (m_info[0]->IsBoolean()) return m_info[0].As<v8::Boolean>()->Value();
+    return m_info[0]->BooleanValue(m_context).FromMaybe(defaultValue);
+  }
+
+  String16 firstArgToString(const String16& defaultValue) {
+    if (m_info.Length() < 1) return defaultValue;
+    v8::Local<v8::String> titleValue;
+    if (m_info[0]->IsObject()) {
+      if (!m_info[0].As<v8::Object>()->ObjectProtoToString(m_context).ToLocal(
+              &titleValue))
+        return defaultValue;
+    } else {
+      if (!m_info[0]->ToString(m_context).ToLocal(&titleValue))
+        return defaultValue;
+    }
+    return toProtocolString(titleValue);
+  }
+
+  v8::MaybeLocal<v8::Object> firstArgAsObject() {
+    if (m_info.Length() < 1 || !m_info[0]->IsObject())
+      return v8::MaybeLocal<v8::Object>();
+    return m_info[0].As<v8::Object>();
+  }
+
+  v8::MaybeLocal<v8::Function> firstArgAsFunction() {
+    if (m_info.Length() < 1 || !m_info[0]->IsFunction())
+      return v8::MaybeLocal<v8::Function>();
+    v8::Local<v8::Function> func = m_info[0].As<v8::Function>();
+    while (func->GetBoundFunction()->IsFunction())
+      func = func->GetBoundFunction().As<v8::Function>();
+    return func;
+  }
+
+  v8::MaybeLocal<v8::Map> privateMap(const char* name) {
+    v8::Local<v8::Object> console = ensureConsole();
+    v8::Local<v8::Private> privateKey =
+        v8::Private::ForApi(m_isolate, toV8StringInternalized(m_isolate, name));
+    v8::Local<v8::Value> mapValue;
+    if (!console->GetPrivate(m_context, privateKey).ToLocal(&mapValue))
+      return v8::MaybeLocal<v8::Map>();
+    if (mapValue->IsUndefined()) {
+      v8::Local<v8::Map> map = v8::Map::New(m_isolate);
+      if (!console->SetPrivate(m_context, privateKey, map).FromMaybe(false))
+        return v8::MaybeLocal<v8::Map>();
+      return map;
+    }
+    return mapValue->IsMap() ? mapValue.As<v8::Map>()
+                             : v8::MaybeLocal<v8::Map>();
+  }
+
+  int32_t getIntFromMap(v8::Local<v8::Map> map, const String16& key,
+                        int32_t defaultValue) {
+    v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+    if (!map->Has(m_context, v8Key).FromMaybe(false)) return defaultValue;
+    v8::Local<v8::Value> intValue;
+    if (!map->Get(m_context, v8Key).ToLocal(&intValue)) return defaultValue;
+    return static_cast<int32_t>(intValue.As<v8::Integer>()->Value());
+  }
+
+  void setIntOnMap(v8::Local<v8::Map> map, const String16& key, int32_t value) {
+    v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+    if (!map->Set(m_context, v8Key, v8::Integer::New(m_isolate, value))
+             .ToLocal(&map))
+      return;
+  }
+
+  double getDoubleFromMap(v8::Local<v8::Map> map, const String16& key,
+                          double defaultValue) {
+    v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+    if (!map->Has(m_context, v8Key).FromMaybe(false)) return defaultValue;
+    v8::Local<v8::Value> intValue;
+    if (!map->Get(m_context, v8Key).ToLocal(&intValue)) return defaultValue;
+    return intValue.As<v8::Number>()->Value();
+  }
+
+  void setDoubleOnMap(v8::Local<v8::Map> map, const String16& key,
+                      double value) {
+    v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+    if (!map->Set(m_context, v8Key, v8::Number::New(m_isolate, value))
+             .ToLocal(&map))
+      return;
+  }
+
+  V8ProfilerAgentImpl* profilerAgent() {
+    if (V8InspectorSessionImpl* session = currentSession()) {
+      if (session && session->profilerAgent()->enabled())
+        return session->profilerAgent();
+    }
+    return nullptr;
+  }
+
+  V8DebuggerAgentImpl* debuggerAgent() {
+    if (V8InspectorSessionImpl* session = currentSession()) {
+      if (session && session->debuggerAgent()->enabled())
+        return session->debuggerAgent();
+    }
+    return nullptr;
+  }
+
+  V8InspectorSessionImpl* currentSession() {
+    InspectedContext* inspectedContext = ensureInspectedContext();
+    if (!inspectedContext) return nullptr;
+    return inspectedContext->inspector()->sessionForContextGroup(
+        inspectedContext->contextGroupId());
+  }
+
+ private:
+  const v8::FunctionCallbackInfo<v8::Value>& m_info;
+  v8::Isolate* m_isolate;
+  v8::Local<v8::Context> m_context;
+  v8::Local<v8::Object> m_console;
+  InspectedContext* m_inspectedContext;
+  V8InspectorClient* m_inspectorClient;
+
+  bool checkAndSetPrivateFlagOnConsole(const char* name, bool defaultValue) {
+    v8::Local<v8::Object> console = ensureConsole();
+    v8::Local<v8::Private> key =
+        v8::Private::ForApi(m_isolate, toV8StringInternalized(m_isolate, name));
+    v8::Local<v8::Value> flagValue;
+    if (!console->GetPrivate(m_context, key).ToLocal(&flagValue))
+      return defaultValue;
+    DCHECK(flagValue->IsUndefined() || flagValue->IsBoolean());
+    if (flagValue->IsBoolean()) {
+      DCHECK(flagValue.As<v8::Boolean>()->Value());
+      return true;
+    }
+    if (!console->SetPrivate(m_context, key, v8::True(m_isolate))
+             .FromMaybe(false))
+      return defaultValue;
+    return false;
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(ConsoleHelper);
+};
+
+void returnDataCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  info.GetReturnValue().Set(info.Data());
+}
+
+void createBoundFunctionProperty(v8::Local<v8::Context> context,
+                                 v8::Local<v8::Object> console,
+                                 const char* name,
+                                 v8::FunctionCallback callback,
+                                 const char* description = nullptr) {
+  v8::Local<v8::String> funcName =
+      toV8StringInternalized(context->GetIsolate(), name);
+  v8::Local<v8::Function> func;
+  if (!v8::Function::New(context, callback, console, 0,
+                         v8::ConstructorBehavior::kThrow)
+           .ToLocal(&func))
+    return;
+  func->SetName(funcName);
+  if (description) {
+    v8::Local<v8::String> returnValue =
+        toV8String(context->GetIsolate(), description);
+    v8::Local<v8::Function> toStringFunction;
+    if (v8::Function::New(context, returnDataCallback, returnValue, 0,
+                          v8::ConstructorBehavior::kThrow)
+            .ToLocal(&toStringFunction))
+      createDataProperty(context, func, toV8StringInternalized(
+                                            context->GetIsolate(), "toString"),
+                         toStringFunction);
+  }
+  createDataProperty(context, console, funcName, func);
+}
+
+}  // namespace
+
+void V8Console::debugCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kDebug);
+}
+
+void V8Console::errorCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kError);
+}
+
+void V8Console::infoCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kInfo);
+}
+
+void V8Console::logCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kLog);
+}
+
+void V8Console::warnCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kWarning);
+}
+
+void V8Console::dirCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kDir);
+}
+
+void V8Console::dirxmlCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kDirXML);
+}
+
+void V8Console::tableCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCall(ConsoleAPIType::kTable);
+}
+
+void V8Console::traceCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kTrace,
+                                                    String16("console.trace"));
+}
+
+void V8Console::groupCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kStartGroup,
+                                                    String16("console.group"));
+}
+
+void V8Console::groupCollapsedCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCallWithDefaultArgument(
+      ConsoleAPIType::kStartGroupCollapsed, String16("console.groupCollapsed"));
+}
+
+void V8Console::groupEndCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCallWithDefaultArgument(
+      ConsoleAPIType::kEndGroup, String16("console.groupEnd"));
+}
+
+void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kClear,
+                                                    String16("console.clear"));
+}
+
+void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+
+  String16 title = helper.firstArgToString(String16());
+  String16 identifier;
+  if (title.isEmpty()) {
+    std::unique_ptr<V8StackTraceImpl> stackTrace =
+        V8StackTraceImpl::capture(nullptr, 0, 1);
+    if (stackTrace && !stackTrace->isEmpty()) {
+      identifier = toString16(stackTrace->topSourceURL()) + ":" +
+                   String16::fromInteger(stackTrace->topLineNumber());
+    }
+  } else {
+    identifier = title + "@";
+  }
+
+  v8::Local<v8::Map> countMap;
+  if (!helper.privateMap("V8Console#countMap").ToLocal(&countMap)) return;
+  int32_t count = helper.getIntFromMap(countMap, identifier, 0) + 1;
+  helper.setIntOnMap(countMap, identifier, count);
+  helper.reportCallWithArgument(ConsoleAPIType::kCount,
+                                title + ": " + String16::fromInteger(count));
+}
+
+void V8Console::assertCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  if (helper.firstArgToBoolean(false)) return;
+
+  std::vector<v8::Local<v8::Value>> arguments;
+  for (int i = 1; i < info.Length(); ++i) arguments.push_back(info[i]);
+  if (info.Length() < 2)
+    arguments.push_back(
+        toV8String(info.GetIsolate(), String16("console.assert")));
+  helper.reportCall(ConsoleAPIType::kAssert, arguments);
+
+  if (V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent())
+    debuggerAgent->breakProgramOnException(
+        protocol::Debugger::Paused::ReasonEnum::Assert, nullptr);
+}
+
+void V8Console::markTimelineCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportDeprecatedCall("V8Console#markTimelineDeprecated",
+                                           "'console.markTimeline' is "
+                                           "deprecated. Please use "
+                                           "'console.timeStamp' instead.");
+  timeStampCallback(info);
+}
+
+void V8Console::profileCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
+    profilerAgent->consoleProfile(helper.firstArgToString(String16()));
+}
+
+void V8Console::profileEndCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
+    profilerAgent->consoleProfileEnd(helper.firstArgToString(String16()));
+}
+
+static void timeFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+                         bool timelinePrefix) {
+  ConsoleHelper helper(info);
+  if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+    String16 protocolTitle = helper.firstArgToString("default");
+    if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+    client->consoleTime(toStringView(protocolTitle));
+
+    v8::Local<v8::Map> timeMap;
+    if (!helper.privateMap("V8Console#timeMap").ToLocal(&timeMap)) return;
+    helper.setDoubleOnMap(timeMap, protocolTitle, client->currentTimeMS());
+  }
+}
+
+static void timeEndFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+                            bool timelinePrefix) {
+  ConsoleHelper helper(info);
+  if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+    String16 protocolTitle = helper.firstArgToString("default");
+    if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+    client->consoleTimeEnd(toStringView(protocolTitle));
+
+    v8::Local<v8::Map> timeMap;
+    if (!helper.privateMap("V8Console#timeMap").ToLocal(&timeMap)) return;
+    double elapsed = client->currentTimeMS() -
+                     helper.getDoubleFromMap(timeMap, protocolTitle, 0.0);
+    String16 message =
+        protocolTitle + ": " + String16::fromDouble(elapsed, 3) + "ms";
+    helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
+  }
+}
+
+void V8Console::timelineCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportDeprecatedCall(
+      "V8Console#timeline",
+      "'console.timeline' is deprecated. Please use 'console.time' instead.");
+  timeFunction(info, true);
+}
+
+void V8Console::timelineEndCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper(info).reportDeprecatedCall("V8Console#timelineEnd",
+                                           "'console.timelineEnd' is "
+                                           "deprecated. Please use "
+                                           "'console.timeEnd' instead.");
+  timeEndFunction(info, true);
+}
+
+void V8Console::timeCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  timeFunction(info, false);
+}
+
+void V8Console::timeEndCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  timeEndFunction(info, false);
+}
+
+void V8Console::timeStampCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+    String16 title = helper.firstArgToString(String16());
+    client->consoleTimeStamp(toStringView(title));
+  }
+}
+
+void V8Console::memoryGetterCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (V8InspectorClient* client = ConsoleHelper(info).ensureDebuggerClient()) {
+    v8::Local<v8::Value> memoryValue;
+    if (!client
+             ->memoryInfo(info.GetIsolate(),
+                          info.GetIsolate()->GetCurrentContext())
+             .ToLocal(&memoryValue))
+      return;
+    info.GetReturnValue().Set(memoryValue);
+  }
+}
+
+void V8Console::memorySetterCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  // We can't make the attribute readonly as it breaks existing code that relies
+  // on being able to assign to console.memory in strict mode. Instead, the
+  // setter just ignores the passed value.  http://crbug.com/468611
+}
+
+void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  v8::Isolate* isolate = info.GetIsolate();
+  info.GetReturnValue().Set(v8::Array::New(isolate));
+
+  ConsoleHelper helper(info);
+  v8::Local<v8::Object> obj;
+  if (!helper.firstArgAsObject().ToLocal(&obj)) return;
+  v8::Local<v8::Array> names;
+  if (!obj->GetOwnPropertyNames(isolate->GetCurrentContext()).ToLocal(&names))
+    return;
+  info.GetReturnValue().Set(names);
+}
+
+void V8Console::valuesCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  v8::Isolate* isolate = info.GetIsolate();
+  info.GetReturnValue().Set(v8::Array::New(isolate));
+
+  ConsoleHelper helper(info);
+  v8::Local<v8::Object> obj;
+  if (!helper.firstArgAsObject().ToLocal(&obj)) return;
+  v8::Local<v8::Array> names;
+  v8::Local<v8::Context> context = isolate->GetCurrentContext();
+  if (!obj->GetOwnPropertyNames(context).ToLocal(&names)) return;
+  v8::Local<v8::Array> values = v8::Array::New(isolate, names->Length());
+  for (uint32_t i = 0; i < names->Length(); ++i) {
+    v8::Local<v8::Value> key;
+    if (!names->Get(context, i).ToLocal(&key)) continue;
+    v8::Local<v8::Value> value;
+    if (!obj->Get(context, key).ToLocal(&value)) continue;
+    createDataProperty(context, values, i, value);
+  }
+  info.GetReturnValue().Set(values);
+}
+
+static void setFunctionBreakpoint(ConsoleHelper& helper,
+                                  v8::Local<v8::Function> function,
+                                  V8DebuggerAgentImpl::BreakpointSource source,
+                                  const String16& condition, bool enable) {
+  V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent();
+  if (!debuggerAgent) return;
+  String16 scriptId = String16::fromInteger(function->ScriptId());
+  int lineNumber = function->GetScriptLineNumber();
+  int columnNumber = function->GetScriptColumnNumber();
+  if (lineNumber == v8::Function::kLineOffsetNotFound ||
+      columnNumber == v8::Function::kLineOffsetNotFound)
+    return;
+  if (enable)
+    debuggerAgent->setBreakpointAt(scriptId, lineNumber, columnNumber, source,
+                                   condition);
+  else
+    debuggerAgent->removeBreakpointAt(scriptId, lineNumber, columnNumber,
+                                      source);
+}
+
+void V8Console::debugFunctionCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  v8::Local<v8::Function> function;
+  if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+  setFunctionBreakpoint(helper, function,
+                        V8DebuggerAgentImpl::DebugCommandBreakpointSource,
+                        String16(), true);
+}
+
+void V8Console::undebugFunctionCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  v8::Local<v8::Function> function;
+  if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+  setFunctionBreakpoint(helper, function,
+                        V8DebuggerAgentImpl::DebugCommandBreakpointSource,
+                        String16(), false);
+}
+
+void V8Console::monitorFunctionCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  v8::Local<v8::Function> function;
+  if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+  v8::Local<v8::Value> name = function->GetName();
+  if (!name->IsString() || !v8::Local<v8::String>::Cast(name)->Length())
+    name = function->GetInferredName();
+  String16 functionName = toProtocolStringWithTypeCheck(name);
+  String16Builder builder;
+  builder.append("console.log(\"function ");
+  if (functionName.isEmpty())
+    builder.append("(anonymous function)");
+  else
+    builder.append(functionName);
+  builder.append(
+      " called\" + (arguments.length > 0 ? \" with arguments: \" + "
+      "Array.prototype.join.call(arguments, \", \") : \"\")) && false");
+  setFunctionBreakpoint(helper, function,
+                        V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
+                        builder.toString(), true);
+}
+
+void V8Console::unmonitorFunctionCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  v8::Local<v8::Function> function;
+  if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+  setFunctionBreakpoint(helper, function,
+                        V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
+                        String16(), false);
+}
+
+void V8Console::lastEvaluationResultCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  ConsoleHelper helper(info);
+  InspectedContext* context = helper.ensureInspectedContext();
+  if (!context) return;
+  if (InjectedScript* injectedScript = context->getInjectedScript())
+    info.GetReturnValue().Set(injectedScript->lastEvaluationResult());
+}
+
+static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
+                        bool copyToClipboard) {
+  if (info.Length() < 1) return;
+  if (!copyToClipboard) info.GetReturnValue().Set(info[0]);
+
+  ConsoleHelper helper(info);
+  InspectedContext* context = helper.ensureInspectedContext();
+  if (!context) return;
+  InjectedScript* injectedScript = context->getInjectedScript();
+  if (!injectedScript) return;
+  ErrorString errorString;
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject =
+      injectedScript->wrapObject(&errorString, info[0], "",
+                                 false /** forceValueType */,
+                                 false /** generatePreview */);
+  if (!wrappedObject || !errorString.isEmpty()) return;
+
+  std::unique_ptr<protocol::DictionaryValue> hints =
+      protocol::DictionaryValue::create();
+  if (copyToClipboard) hints->setBoolean("copyToClipboard", true);
+  if (V8InspectorSessionImpl* session = helper.currentSession())
+    session->runtimeAgent()->inspect(std::move(wrappedObject),
+                                     std::move(hints));
+}
+
+void V8Console::inspectCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  inspectImpl(info, false);
+}
+
+void V8Console::copyCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  inspectImpl(info, true);
+}
+
+void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
+                                unsigned num) {
+  DCHECK(num < V8InspectorSessionImpl::kInspectedObjectBufferSize);
+  ConsoleHelper helper(info);
+  if (V8InspectorSessionImpl* session = helper.currentSession()) {
+    V8InspectorSession::Inspectable* object = session->inspectedObject(num);
+    v8::Isolate* isolate = info.GetIsolate();
+    if (object)
+      info.GetReturnValue().Set(object->get(isolate->GetCurrentContext()));
+    else
+      info.GetReturnValue().Set(v8::Undefined(isolate));
+  }
+}
+
+v8::Local<v8::Object> V8Console::createConsole(
+    InspectedContext* inspectedContext, bool hasMemoryAttribute) {
+  v8::Local<v8::Context> context = inspectedContext->context();
+  v8::Context::Scope contextScope(context);
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::MicrotasksScope microtasksScope(isolate,
+                                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+  v8::Local<v8::Object> console = v8::Object::New(isolate);
+  bool success =
+      console->SetPrototype(context, v8::Object::New(isolate)).FromMaybe(false);
+  DCHECK(success);
+  USE(success);
+
+  createBoundFunctionProperty(context, console, "debug",
+                              V8Console::debugCallback);
+  createBoundFunctionProperty(context, console, "error",
+                              V8Console::errorCallback);
+  createBoundFunctionProperty(context, console, "info",
+                              V8Console::infoCallback);
+  createBoundFunctionProperty(context, console, "log", V8Console::logCallback);
+  createBoundFunctionProperty(context, console, "warn",
+                              V8Console::warnCallback);
+  createBoundFunctionProperty(context, console, "dir", V8Console::dirCallback);
+  createBoundFunctionProperty(context, console, "dirxml",
+                              V8Console::dirxmlCallback);
+  createBoundFunctionProperty(context, console, "table",
+                              V8Console::tableCallback);
+  createBoundFunctionProperty(context, console, "trace",
+                              V8Console::traceCallback);
+  createBoundFunctionProperty(context, console, "group",
+                              V8Console::groupCallback);
+  createBoundFunctionProperty(context, console, "groupCollapsed",
+                              V8Console::groupCollapsedCallback);
+  createBoundFunctionProperty(context, console, "groupEnd",
+                              V8Console::groupEndCallback);
+  createBoundFunctionProperty(context, console, "clear",
+                              V8Console::clearCallback);
+  createBoundFunctionProperty(context, console, "count",
+                              V8Console::countCallback);
+  createBoundFunctionProperty(context, console, "assert",
+                              V8Console::assertCallback);
+  createBoundFunctionProperty(context, console, "markTimeline",
+                              V8Console::markTimelineCallback);
+  createBoundFunctionProperty(context, console, "profile",
+                              V8Console::profileCallback);
+  createBoundFunctionProperty(context, console, "profileEnd",
+                              V8Console::profileEndCallback);
+  createBoundFunctionProperty(context, console, "timeline",
+                              V8Console::timelineCallback);
+  createBoundFunctionProperty(context, console, "timelineEnd",
+                              V8Console::timelineEndCallback);
+  createBoundFunctionProperty(context, console, "time",
+                              V8Console::timeCallback);
+  createBoundFunctionProperty(context, console, "timeEnd",
+                              V8Console::timeEndCallback);
+  createBoundFunctionProperty(context, console, "timeStamp",
+                              V8Console::timeStampCallback);
+
+  if (hasMemoryAttribute)
+    console->SetAccessorProperty(
+        toV8StringInternalized(isolate, "memory"),
+        v8::Function::New(context, V8Console::memoryGetterCallback, console, 0,
+                          v8::ConstructorBehavior::kThrow)
+            .ToLocalChecked(),
+        v8::Function::New(context, V8Console::memorySetterCallback,
+                          v8::Local<v8::Value>(), 0,
+                          v8::ConstructorBehavior::kThrow)
+            .ToLocalChecked(),
+        static_cast<v8::PropertyAttribute>(v8::None), v8::DEFAULT);
+
+  console->SetPrivate(context, inspectedContextPrivateKey(isolate),
+                      v8::External::New(isolate, inspectedContext));
+  return console;
+}
+
+void V8Console::clearInspectedContextIfNeeded(v8::Local<v8::Context> context,
+                                              v8::Local<v8::Object> console) {
+  v8::Isolate* isolate = context->GetIsolate();
+  console->SetPrivate(context, inspectedContextPrivateKey(isolate),
+                      v8::External::New(isolate, nullptr));
+}
+
+v8::Local<v8::Object> V8Console::createCommandLineAPI(
+    InspectedContext* inspectedContext) {
+  v8::Local<v8::Context> context = inspectedContext->context();
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::MicrotasksScope microtasksScope(isolate,
+                                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+  v8::Local<v8::Object> commandLineAPI = v8::Object::New(isolate);
+  bool success =
+      commandLineAPI->SetPrototype(context, v8::Null(isolate)).FromMaybe(false);
+  DCHECK(success);
+  USE(success);
+
+  createBoundFunctionProperty(context, commandLineAPI, "dir",
+                              V8Console::dirCallback,
+                              "function dir(value) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "dirxml",
+                              V8Console::dirxmlCallback,
+                              "function dirxml(value) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "profile",
+                              V8Console::profileCallback,
+                              "function profile(title) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "profileEnd", V8Console::profileEndCallback,
+      "function profileEnd(title) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "clear",
+                              V8Console::clearCallback,
+                              "function clear() { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "table", V8Console::tableCallback,
+      "function table(data, [columns]) { [Command Line API] }");
+
+  createBoundFunctionProperty(context, commandLineAPI, "keys",
+                              V8Console::keysCallback,
+                              "function keys(object) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "values",
+                              V8Console::valuesCallback,
+                              "function values(object) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "debug", V8Console::debugFunctionCallback,
+      "function debug(function) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "undebug", V8Console::undebugFunctionCallback,
+      "function undebug(function) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "monitor", V8Console::monitorFunctionCallback,
+      "function monitor(function) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "unmonitor",
+      V8Console::unmonitorFunctionCallback,
+      "function unmonitor(function) { [Command Line API] }");
+  createBoundFunctionProperty(
+      context, commandLineAPI, "inspect", V8Console::inspectCallback,
+      "function inspect(object) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "copy",
+                              V8Console::copyCallback,
+                              "function copy(value) { [Command Line API] }");
+  createBoundFunctionProperty(context, commandLineAPI, "$_",
+                              V8Console::lastEvaluationResultCallback);
+  createBoundFunctionProperty(context, commandLineAPI, "$0",
+                              V8Console::inspectedObject0);
+  createBoundFunctionProperty(context, commandLineAPI, "$1",
+                              V8Console::inspectedObject1);
+  createBoundFunctionProperty(context, commandLineAPI, "$2",
+                              V8Console::inspectedObject2);
+  createBoundFunctionProperty(context, commandLineAPI, "$3",
+                              V8Console::inspectedObject3);
+  createBoundFunctionProperty(context, commandLineAPI, "$4",
+                              V8Console::inspectedObject4);
+
+  inspectedContext->inspector()->client()->installAdditionalCommandLineAPI(
+      context, commandLineAPI);
+
+  commandLineAPI->SetPrivate(context, inspectedContextPrivateKey(isolate),
+                             v8::External::New(isolate, inspectedContext));
+  return commandLineAPI;
+}
+
+static bool isCommandLineAPIGetter(const String16& name) {
+  if (name.length() != 2) return false;
+  // $0 ... $4, $_
+  return name[0] == '$' &&
+         ((name[1] >= '0' && name[1] <= '4') || name[1] == '_');
+}
+
+void V8Console::CommandLineAPIScope::accessorGetterCallback(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
+      info.Data().As<v8::External>()->Value());
+  DCHECK(scope);
+
+  v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+  if (scope->m_cleanup) {
+    bool removed = info.Holder()->Delete(context, name).FromMaybe(false);
+    DCHECK(removed);
+    USE(removed);
+    return;
+  }
+  v8::Local<v8::Object> commandLineAPI = scope->m_commandLineAPI;
+
+  v8::Local<v8::Value> value;
+  if (!commandLineAPI->Get(context, name).ToLocal(&value)) return;
+  if (isCommandLineAPIGetter(toProtocolStringWithTypeCheck(name))) {
+    DCHECK(value->IsFunction());
+    v8::MicrotasksScope microtasks(info.GetIsolate(),
+                                   v8::MicrotasksScope::kDoNotRunMicrotasks);
+    if (value.As<v8::Function>()
+            ->Call(context, commandLineAPI, 0, nullptr)
+            .ToLocal(&value))
+      info.GetReturnValue().Set(value);
+  } else {
+    info.GetReturnValue().Set(value);
+  }
+}
+
+void V8Console::CommandLineAPIScope::accessorSetterCallback(
+    v8::Local<v8::Name> name, v8::Local<v8::Value> value,
+    const v8::PropertyCallbackInfo<void>& info) {
+  CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
+      info.Data().As<v8::External>()->Value());
+  v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+  if (!info.Holder()->Delete(context, name).FromMaybe(false)) return;
+  if (!info.Holder()->CreateDataProperty(context, name, value).FromMaybe(false))
+    return;
+  bool removed =
+      scope->m_installedMethods->Delete(context, name).FromMaybe(false);
+  DCHECK(removed);
+  USE(removed);
+}
+
+V8Console::CommandLineAPIScope::CommandLineAPIScope(
+    v8::Local<v8::Context> context, v8::Local<v8::Object> commandLineAPI,
+    v8::Local<v8::Object> global)
+    : m_context(context),
+      m_commandLineAPI(commandLineAPI),
+      m_global(global),
+      m_installedMethods(v8::Set::New(context->GetIsolate())),
+      m_cleanup(false) {
+  v8::Local<v8::Array> names;
+  if (!m_commandLineAPI->GetOwnPropertyNames(context).ToLocal(&names)) return;
+  v8::Local<v8::External> externalThis =
+      v8::External::New(context->GetIsolate(), this);
+  for (uint32_t i = 0; i < names->Length(); ++i) {
+    v8::Local<v8::Value> name;
+    if (!names->Get(context, i).ToLocal(&name) || !name->IsName()) continue;
+    if (m_global->Has(context, name).FromMaybe(true)) continue;
+    if (!m_installedMethods->Add(context, name).ToLocal(&m_installedMethods))
+      continue;
+    if (!m_global
+             ->SetAccessor(context, v8::Local<v8::Name>::Cast(name),
+                           CommandLineAPIScope::accessorGetterCallback,
+                           CommandLineAPIScope::accessorSetterCallback,
+                           externalThis, v8::DEFAULT, v8::DontEnum)
+             .FromMaybe(false)) {
+      bool removed = m_installedMethods->Delete(context, name).FromMaybe(false);
+      DCHECK(removed);
+      USE(removed);
+      continue;
+    }
+  }
+}
+
+V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
+  m_cleanup = true;
+  v8::Local<v8::Array> names = m_installedMethods->AsArray();
+  for (uint32_t i = 0; i < names->Length(); ++i) {
+    v8::Local<v8::Value> name;
+    if (!names->Get(m_context, i).ToLocal(&name) || !name->IsName()) continue;
+    if (name->IsString()) {
+      v8::Local<v8::Value> descriptor;
+      bool success = m_global
+                         ->GetOwnPropertyDescriptor(
+                             m_context, v8::Local<v8::String>::Cast(name))
+                         .ToLocal(&descriptor);
+      DCHECK(success);
+      USE(success);
+    }
+  }
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-console.h b/src/inspector/v8-console.h
new file mode 100644
index 0000000..c643d49
--- /dev/null
+++ b/src/inspector/v8-console.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLE_H_
+#define V8_INSPECTOR_V8CONSOLE_H_
+
+#include "src/base/macros.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+
+// Console API
+// https://console.spec.whatwg.org/#console-interface
+class V8Console {
+ public:
+  static v8::Local<v8::Object> createConsole(InspectedContext*,
+                                             bool hasMemoryAttribute);
+  static void clearInspectedContextIfNeeded(v8::Local<v8::Context>,
+                                            v8::Local<v8::Object> console);
+  static v8::Local<v8::Object> createCommandLineAPI(InspectedContext*);
+
+  class CommandLineAPIScope {
+   public:
+    CommandLineAPIScope(v8::Local<v8::Context>,
+                        v8::Local<v8::Object> commandLineAPI,
+                        v8::Local<v8::Object> global);
+    ~CommandLineAPIScope();
+
+   private:
+    static void accessorGetterCallback(
+        v8::Local<v8::Name>, const v8::PropertyCallbackInfo<v8::Value>&);
+    static void accessorSetterCallback(v8::Local<v8::Name>,
+                                       v8::Local<v8::Value>,
+                                       const v8::PropertyCallbackInfo<void>&);
+
+    v8::Local<v8::Context> m_context;
+    v8::Local<v8::Object> m_commandLineAPI;
+    v8::Local<v8::Object> m_global;
+    v8::Local<v8::Set> m_installedMethods;
+    bool m_cleanup;
+
+    DISALLOW_COPY_AND_ASSIGN(CommandLineAPIScope);
+  };
+
+ private:
+  static void debugCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void errorCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void infoCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void logCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void warnCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void dirCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void dirxmlCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void tableCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void traceCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void groupCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void groupCollapsedCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void groupEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void clearCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void countCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void assertCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void markTimelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void profileCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void profileEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void timelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void timelineEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void timeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void timeEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void timeStampCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  // TODO(foolip): There is no spec for the Memory Info API, see blink-dev:
+  // https://groups.google.com/a/chromium.org/d/msg/blink-dev/g5YRCGpC9vs/b4OJz71NmPwJ
+  static void memoryGetterCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void memorySetterCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+
+  // CommandLineAPI
+  static void keysCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void valuesCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void debugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void undebugFunctionCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void monitorFunctionCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void unmonitorFunctionCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void lastEvaluationResultCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void inspectCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void copyCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void inspectedObject(const v8::FunctionCallbackInfo<v8::Value>&,
+                              unsigned num);
+  static void inspectedObject0(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    inspectedObject(info, 0);
+  }
+  static void inspectedObject1(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    inspectedObject(info, 1);
+  }
+  static void inspectedObject2(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    inspectedObject(info, 2);
+  }
+  static void inspectedObject3(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    inspectedObject(info, 3);
+  }
+  static void inspectedObject4(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    inspectedObject(info, 4);
+  }
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8CONSOLE_H_
diff --git a/src/inspector/v8-debugger-agent-impl.cc b/src/inspector/v8-debugger-agent-impl.cc
new file mode 100644
index 0000000..80e2611
--- /dev/null
+++ b/src/inspector/v8-debugger-agent-impl.cc
@@ -0,0 +1,1255 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger-agent-impl.h"
+
+#include <algorithm>
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/search-util.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-regex.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+using protocol::Array;
+using protocol::Maybe;
+using protocol::Debugger::BreakpointId;
+using protocol::Debugger::CallFrame;
+using protocol::Runtime::ExceptionDetails;
+using protocol::Runtime::ScriptId;
+using protocol::Runtime::StackTrace;
+using protocol::Runtime::RemoteObject;
+
+namespace DebuggerAgentState {
+static const char javaScriptBreakpoints[] = "javaScriptBreakopints";
+static const char pauseOnExceptionsState[] = "pauseOnExceptionsState";
+static const char asyncCallStackDepth[] = "asyncCallStackDepth";
+static const char blackboxPattern[] = "blackboxPattern";
+static const char debuggerEnabled[] = "debuggerEnabled";
+
+// Breakpoint properties.
+static const char url[] = "url";
+static const char isRegex[] = "isRegex";
+static const char lineNumber[] = "lineNumber";
+static const char columnNumber[] = "columnNumber";
+static const char condition[] = "condition";
+static const char skipAllPauses[] = "skipAllPauses";
+
+}  // namespace DebuggerAgentState
+
+static const int maxSkipStepFrameCount = 128;
+static const char backtraceObjectGroup[] = "backtrace";
+
+static String16 breakpointIdSuffix(
+    V8DebuggerAgentImpl::BreakpointSource source) {
+  switch (source) {
+    case V8DebuggerAgentImpl::UserBreakpointSource:
+      break;
+    case V8DebuggerAgentImpl::DebugCommandBreakpointSource:
+      return ":debug";
+    case V8DebuggerAgentImpl::MonitorCommandBreakpointSource:
+      return ":monitor";
+  }
+  return String16();
+}
+
+static String16 generateBreakpointId(
+    const String16& scriptId, int lineNumber, int columnNumber,
+    V8DebuggerAgentImpl::BreakpointSource source) {
+  return scriptId + ":" + String16::fromInteger(lineNumber) + ":" +
+         String16::fromInteger(columnNumber) + breakpointIdSuffix(source);
+}
+
+static bool positionComparator(const std::pair<int, int>& a,
+                               const std::pair<int, int>& b) {
+  if (a.first != b.first) return a.first < b.first;
+  return a.second < b.second;
+}
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+  if (hasError) *errorString = "Internal error";
+  return hasError;
+}
+
+static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
+    const String16& scriptId, int lineNumber, int columnNumber) {
+  return protocol::Debugger::Location::create()
+      .setScriptId(scriptId)
+      .setLineNumber(lineNumber)
+      .setColumnNumber(columnNumber)
+      .build();
+}
+
+V8DebuggerAgentImpl::V8DebuggerAgentImpl(
+    V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+    protocol::DictionaryValue* state)
+    : m_inspector(session->inspector()),
+      m_debugger(m_inspector->debugger()),
+      m_session(session),
+      m_enabled(false),
+      m_state(state),
+      m_frontend(frontendChannel),
+      m_isolate(m_inspector->isolate()),
+      m_breakReason(protocol::Debugger::Paused::ReasonEnum::Other),
+      m_scheduledDebuggerStep(NoStep),
+      m_skipNextDebuggerStepOut(false),
+      m_javaScriptPauseScheduled(false),
+      m_steppingFromFramework(false),
+      m_pausingOnNativeEvent(false),
+      m_skippedStepFrameCount(0),
+      m_recursionLevelForStepOut(0),
+      m_recursionLevelForStepFrame(0),
+      m_skipAllPauses(false) {
+  clearBreakDetails();
+}
+
+V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
+
+bool V8DebuggerAgentImpl::checkEnabled(ErrorString* errorString) {
+  if (enabled()) return true;
+  *errorString = "Debugger agent is not enabled";
+  return false;
+}
+
+void V8DebuggerAgentImpl::enable() {
+  // m_inspector->addListener may result in reporting all parsed scripts to
+  // the agent so it should already be in enabled state by then.
+  m_enabled = true;
+  m_state->setBoolean(DebuggerAgentState::debuggerEnabled, true);
+  m_debugger->enable();
+
+  std::vector<std::unique_ptr<V8DebuggerScript>> compiledScripts;
+  m_debugger->getCompiledScripts(m_session->contextGroupId(), compiledScripts);
+  for (size_t i = 0; i < compiledScripts.size(); i++)
+    didParseSource(std::move(compiledScripts[i]), true);
+
+  // FIXME(WK44513): breakpoints activated flag should be synchronized between
+  // all front-ends
+  m_debugger->setBreakpointsActivated(true);
+}
+
+bool V8DebuggerAgentImpl::enabled() { return m_enabled; }
+
+void V8DebuggerAgentImpl::enable(ErrorString* errorString) {
+  if (enabled()) return;
+
+  if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId())) {
+    *errorString = "Script execution is prohibited";
+    return;
+  }
+
+  enable();
+}
+
+void V8DebuggerAgentImpl::disable(ErrorString*) {
+  if (!enabled()) return;
+
+  m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
+                     protocol::DictionaryValue::create());
+  m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
+                      V8Debugger::DontPauseOnExceptions);
+  m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
+
+  if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
+  m_debugger->disable();
+  m_pausedContext.Reset();
+  JavaScriptCallFrames emptyCallFrames;
+  m_pausedCallFrames.swap(emptyCallFrames);
+  m_scripts.clear();
+  m_blackboxedPositions.clear();
+  m_breakpointIdToDebuggerBreakpointIds.clear();
+  m_debugger->setAsyncCallStackDepth(this, 0);
+  m_continueToLocationBreakpointId = String16();
+  clearBreakDetails();
+  m_scheduledDebuggerStep = NoStep;
+  m_skipNextDebuggerStepOut = false;
+  m_javaScriptPauseScheduled = false;
+  m_steppingFromFramework = false;
+  m_pausingOnNativeEvent = false;
+  m_skippedStepFrameCount = 0;
+  m_recursionLevelForStepFrame = 0;
+  m_skipAllPauses = false;
+  m_blackboxPattern = nullptr;
+  m_state->remove(DebuggerAgentState::blackboxPattern);
+  m_enabled = false;
+  m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
+}
+
+void V8DebuggerAgentImpl::restore() {
+  DCHECK(!m_enabled);
+  if (!m_state->booleanProperty(DebuggerAgentState::debuggerEnabled, false))
+    return;
+  if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
+    return;
+
+  enable();
+  ErrorString error;
+
+  int pauseState = V8Debugger::DontPauseOnExceptions;
+  m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
+  setPauseOnExceptionsImpl(&error, pauseState);
+  DCHECK(error.isEmpty());
+
+  m_skipAllPauses =
+      m_state->booleanProperty(DebuggerAgentState::skipAllPauses, false);
+
+  int asyncCallStackDepth = 0;
+  m_state->getInteger(DebuggerAgentState::asyncCallStackDepth,
+                      &asyncCallStackDepth);
+  m_debugger->setAsyncCallStackDepth(this, asyncCallStackDepth);
+
+  String16 blackboxPattern;
+  if (m_state->getString(DebuggerAgentState::blackboxPattern,
+                         &blackboxPattern)) {
+    if (!setBlackboxPattern(&error, blackboxPattern)) UNREACHABLE();
+  }
+}
+
+void V8DebuggerAgentImpl::setBreakpointsActive(ErrorString* errorString,
+                                               bool active) {
+  if (!checkEnabled(errorString)) return;
+  m_debugger->setBreakpointsActivated(active);
+}
+
+void V8DebuggerAgentImpl::setSkipAllPauses(ErrorString*, bool skip) {
+  m_skipAllPauses = skip;
+  m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
+}
+
+static std::unique_ptr<protocol::DictionaryValue>
+buildObjectForBreakpointCookie(const String16& url, int lineNumber,
+                               int columnNumber, const String16& condition,
+                               bool isRegex) {
+  std::unique_ptr<protocol::DictionaryValue> breakpointObject =
+      protocol::DictionaryValue::create();
+  breakpointObject->setString(DebuggerAgentState::url, url);
+  breakpointObject->setInteger(DebuggerAgentState::lineNumber, lineNumber);
+  breakpointObject->setInteger(DebuggerAgentState::columnNumber, columnNumber);
+  breakpointObject->setString(DebuggerAgentState::condition, condition);
+  breakpointObject->setBoolean(DebuggerAgentState::isRegex, isRegex);
+  return breakpointObject;
+}
+
+static bool matches(V8InspectorImpl* inspector, const String16& url,
+                    const String16& pattern, bool isRegex) {
+  if (isRegex) {
+    V8Regex regex(inspector, pattern, true);
+    return regex.match(url) != -1;
+  }
+  return url == pattern;
+}
+
+void V8DebuggerAgentImpl::setBreakpointByUrl(
+    ErrorString* errorString, int lineNumber,
+    const Maybe<String16>& optionalURL, const Maybe<String16>& optionalURLRegex,
+    const Maybe<int>& optionalColumnNumber,
+    const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+    std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
+  *locations = Array<protocol::Debugger::Location>::create();
+  if (optionalURL.isJust() == optionalURLRegex.isJust()) {
+    *errorString = "Either url or urlRegex must be specified.";
+    return;
+  }
+
+  String16 url = optionalURL.isJust() ? optionalURL.fromJust()
+                                      : optionalURLRegex.fromJust();
+  int columnNumber = 0;
+  if (optionalColumnNumber.isJust()) {
+    columnNumber = optionalColumnNumber.fromJust();
+    if (columnNumber < 0) {
+      *errorString = "Incorrect column number";
+      return;
+    }
+  }
+  String16 condition = optionalCondition.fromMaybe("");
+  bool isRegex = optionalURLRegex.isJust();
+
+  String16 breakpointId = (isRegex ? "/" + url + "/" : url) + ":" +
+                          String16::fromInteger(lineNumber) + ":" +
+                          String16::fromInteger(columnNumber);
+  protocol::DictionaryValue* breakpointsCookie =
+      m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+  if (!breakpointsCookie) {
+    std::unique_ptr<protocol::DictionaryValue> newValue =
+        protocol::DictionaryValue::create();
+    breakpointsCookie = newValue.get();
+    m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
+                       std::move(newValue));
+  }
+  if (breakpointsCookie->get(breakpointId)) {
+    *errorString = "Breakpoint at specified location already exists.";
+    return;
+  }
+
+  breakpointsCookie->setObject(
+      breakpointId, buildObjectForBreakpointCookie(
+                        url, lineNumber, columnNumber, condition, isRegex));
+
+  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+  for (const auto& script : m_scripts) {
+    if (!matches(m_inspector, script.second->sourceURL(), url, isRegex))
+      continue;
+    std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
+        breakpointId, script.first, breakpoint, UserBreakpointSource);
+    if (location) (*locations)->addItem(std::move(location));
+  }
+
+  *outBreakpointId = breakpointId;
+}
+
+static bool parseLocation(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::Debugger::Location> location, String16* scriptId,
+    int* lineNumber, int* columnNumber) {
+  *scriptId = location->getScriptId();
+  *lineNumber = location->getLineNumber();
+  *columnNumber = location->getColumnNumber(0);
+  return true;
+}
+
+void V8DebuggerAgentImpl::setBreakpoint(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::Debugger::Location> location,
+    const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+    std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
+  String16 scriptId;
+  int lineNumber;
+  int columnNumber;
+
+  if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
+                     &columnNumber))
+    return;
+
+  String16 condition = optionalCondition.fromMaybe("");
+
+  String16 breakpointId = generateBreakpointId(
+      scriptId, lineNumber, columnNumber, UserBreakpointSource);
+  if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
+      m_breakpointIdToDebuggerBreakpointIds.end()) {
+    *errorString = "Breakpoint at specified location already exists.";
+    return;
+  }
+  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+  *actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
+                                      UserBreakpointSource);
+  if (*actualLocation)
+    *outBreakpointId = breakpointId;
+  else
+    *errorString = "Could not resolve breakpoint";
+}
+
+void V8DebuggerAgentImpl::removeBreakpoint(ErrorString* errorString,
+                                           const String16& breakpointId) {
+  if (!checkEnabled(errorString)) return;
+  protocol::DictionaryValue* breakpointsCookie =
+      m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+  if (breakpointsCookie) breakpointsCookie->remove(breakpointId);
+  removeBreakpoint(breakpointId);
+}
+
+void V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+  DCHECK(enabled());
+  BreakpointIdToDebuggerBreakpointIdsMap::iterator
+      debuggerBreakpointIdsIterator =
+          m_breakpointIdToDebuggerBreakpointIds.find(breakpointId);
+  if (debuggerBreakpointIdsIterator ==
+      m_breakpointIdToDebuggerBreakpointIds.end())
+    return;
+  const std::vector<String16>& ids = debuggerBreakpointIdsIterator->second;
+  for (size_t i = 0; i < ids.size(); ++i) {
+    const String16& debuggerBreakpointId = ids[i];
+
+    m_debugger->removeBreakpoint(debuggerBreakpointId);
+    m_serverBreakpoints.erase(debuggerBreakpointId);
+  }
+  m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
+}
+
+void V8DebuggerAgentImpl::continueToLocation(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::Debugger::Location> location) {
+  if (!checkEnabled(errorString)) return;
+  if (!m_continueToLocationBreakpointId.isEmpty()) {
+    m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
+    m_continueToLocationBreakpointId = "";
+  }
+
+  String16 scriptId;
+  int lineNumber;
+  int columnNumber;
+
+  if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
+                     &columnNumber))
+    return;
+
+  ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
+  m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
+      scriptId, breakpoint, &lineNumber, &columnNumber);
+  resume(errorString);
+}
+
+bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
+  DCHECK(enabled());
+  JavaScriptCallFrames callFrames = m_debugger->currentCallFrames();
+  for (size_t index = 0; index < callFrames.size(); ++index) {
+    if (!isCallFrameWithUnknownScriptOrBlackboxed(callFrames[index].get()))
+      return false;
+  }
+  return true;
+}
+
+bool V8DebuggerAgentImpl::isTopPausedCallFrameBlackboxed() {
+  DCHECK(enabled());
+  JavaScriptCallFrame* frame =
+      m_pausedCallFrames.size() ? m_pausedCallFrames[0].get() : nullptr;
+  return isCallFrameWithUnknownScriptOrBlackboxed(frame);
+}
+
+bool V8DebuggerAgentImpl::isCallFrameWithUnknownScriptOrBlackboxed(
+    JavaScriptCallFrame* frame) {
+  if (!frame) return true;
+  ScriptsMap::iterator it =
+      m_scripts.find(String16::fromInteger(frame->sourceID()));
+  if (it == m_scripts.end()) {
+    // Unknown scripts are blackboxed.
+    return true;
+  }
+  if (m_blackboxPattern) {
+    const String16& scriptSourceURL = it->second->sourceURL();
+    if (!scriptSourceURL.isEmpty() &&
+        m_blackboxPattern->match(scriptSourceURL) != -1)
+      return true;
+  }
+  auto itBlackboxedPositions =
+      m_blackboxedPositions.find(String16::fromInteger(frame->sourceID()));
+  if (itBlackboxedPositions == m_blackboxedPositions.end()) return false;
+
+  const std::vector<std::pair<int, int>>& ranges =
+      itBlackboxedPositions->second;
+  auto itRange = std::lower_bound(
+      ranges.begin(), ranges.end(),
+      std::make_pair(frame->line(), frame->column()), positionComparator);
+  // Ranges array contains positions in script where blackbox state is changed.
+  // [(0,0) ... ranges[0]) isn't blackboxed, [ranges[0] ... ranges[1]) is
+  // blackboxed...
+  return std::distance(ranges.begin(), itRange) % 2;
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest
+V8DebuggerAgentImpl::shouldSkipExceptionPause(
+    JavaScriptCallFrame* topCallFrame) {
+  if (m_steppingFromFramework) return RequestNoSkip;
+  if (isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
+    return RequestContinue;
+  return RequestNoSkip;
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
+    JavaScriptCallFrame* topCallFrame) {
+  if (m_steppingFromFramework) return RequestNoSkip;
+
+  if (m_skipNextDebuggerStepOut) {
+    m_skipNextDebuggerStepOut = false;
+    if (m_scheduledDebuggerStep == StepOut) return RequestStepOut;
+  }
+
+  if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
+    return RequestNoSkip;
+
+  if (m_skippedStepFrameCount >= maxSkipStepFrameCount) return RequestStepOut;
+
+  if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
+
+  ++m_skippedStepFrameCount;
+  return RequestStepFrame;
+}
+
+std::unique_ptr<protocol::Debugger::Location>
+V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
+                                       const String16& scriptId,
+                                       const ScriptBreakpoint& breakpoint,
+                                       BreakpointSource source) {
+  DCHECK(enabled());
+  // FIXME: remove these checks once crbug.com/520702 is resolved.
+  CHECK(!breakpointId.isEmpty());
+  CHECK(!scriptId.isEmpty());
+  ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+  if (scriptIterator == m_scripts.end()) return nullptr;
+  if (breakpoint.lineNumber < scriptIterator->second->startLine() ||
+      scriptIterator->second->endLine() < breakpoint.lineNumber)
+    return nullptr;
+
+  int actualLineNumber;
+  int actualColumnNumber;
+  String16 debuggerBreakpointId = m_debugger->setBreakpoint(
+      scriptId, breakpoint, &actualLineNumber, &actualColumnNumber);
+  if (debuggerBreakpointId.isEmpty()) return nullptr;
+
+  m_serverBreakpoints[debuggerBreakpointId] =
+      std::make_pair(breakpointId, source);
+  CHECK(!breakpointId.isEmpty());
+
+  m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
+      debuggerBreakpointId);
+  return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
+}
+
+void V8DebuggerAgentImpl::searchInContent(
+    ErrorString* error, const String16& scriptId, const String16& query,
+    const Maybe<bool>& optionalCaseSensitive,
+    const Maybe<bool>& optionalIsRegex,
+    std::unique_ptr<Array<protocol::Debugger::SearchMatch>>* results) {
+  v8::HandleScope handles(m_isolate);
+  ScriptsMap::iterator it = m_scripts.find(scriptId);
+  if (it == m_scripts.end()) {
+    *error = String16("No script for id: " + scriptId);
+    return;
+  }
+
+  std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
+      searchInTextByLinesImpl(m_session,
+                              toProtocolString(it->second->source(m_isolate)),
+                              query, optionalCaseSensitive.fromMaybe(false),
+                              optionalIsRegex.fromMaybe(false));
+  *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
+  for (size_t i = 0; i < matches.size(); ++i)
+    (*results)->addItem(std::move(matches[i]));
+}
+
+void V8DebuggerAgentImpl::setScriptSource(
+    ErrorString* errorString, const String16& scriptId,
+    const String16& newContent, const Maybe<bool>& dryRun,
+    Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
+    Maybe<bool>* stackChanged, Maybe<StackTrace>* asyncStackTrace,
+    Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
+  if (!checkEnabled(errorString)) return;
+
+  v8::HandleScope handles(m_isolate);
+  v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
+  if (!m_debugger->setScriptSource(scriptId, newSource, dryRun.fromMaybe(false),
+                                   errorString, optOutCompileError,
+                                   &m_pausedCallFrames, stackChanged))
+    return;
+
+  ScriptsMap::iterator it = m_scripts.find(scriptId);
+  if (it != m_scripts.end()) it->second->setSource(m_isolate, newSource);
+
+  std::unique_ptr<Array<CallFrame>> callFrames = currentCallFrames(errorString);
+  if (!callFrames) return;
+  *newCallFrames = std::move(callFrames);
+  *asyncStackTrace = currentAsyncStackTrace();
+}
+
+void V8DebuggerAgentImpl::restartFrame(
+    ErrorString* errorString, const String16& callFrameId,
+    std::unique_ptr<Array<CallFrame>>* newCallFrames,
+    Maybe<StackTrace>* asyncStackTrace) {
+  if (!assertPaused(errorString)) return;
+  InjectedScript::CallFrameScope scope(
+      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+  if (!scope.initialize()) return;
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+    *errorString = "Could not find call frame with given id";
+    return;
+  }
+
+  v8::Local<v8::Value> resultValue;
+  v8::Local<v8::Boolean> result;
+  if (!m_pausedCallFrames[scope.frameOrdinal()]->restart().ToLocal(
+          &resultValue) ||
+      scope.tryCatch().HasCaught() ||
+      !resultValue->ToBoolean(scope.context()).ToLocal(&result) ||
+      !result->Value()) {
+    *errorString = "Internal error";
+    return;
+  }
+  JavaScriptCallFrames frames = m_debugger->currentCallFrames();
+  m_pausedCallFrames.swap(frames);
+
+  *newCallFrames = currentCallFrames(errorString);
+  if (!*newCallFrames) return;
+  *asyncStackTrace = currentAsyncStackTrace();
+}
+
+void V8DebuggerAgentImpl::getScriptSource(ErrorString* error,
+                                          const String16& scriptId,
+                                          String16* scriptSource) {
+  if (!checkEnabled(error)) return;
+  ScriptsMap::iterator it = m_scripts.find(scriptId);
+  if (it == m_scripts.end()) {
+    *error = "No script for id: " + scriptId;
+    return;
+  }
+  v8::HandleScope handles(m_isolate);
+  *scriptSource = toProtocolString(it->second->source(m_isolate));
+}
+
+void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
+    const String16& breakReason,
+    std::unique_ptr<protocol::DictionaryValue> data) {
+  if (!enabled() || m_scheduledDebuggerStep == StepInto ||
+      m_javaScriptPauseScheduled || m_debugger->isPaused() ||
+      !m_debugger->breakpointsActivated())
+    return;
+  m_breakReason = breakReason;
+  m_breakAuxData = std::move(data);
+  m_pausingOnNativeEvent = true;
+  m_skipNextDebuggerStepOut = false;
+  m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::schedulePauseOnNextStatementIfSteppingInto() {
+  DCHECK(enabled());
+  if (m_scheduledDebuggerStep != StepInto || m_javaScriptPauseScheduled ||
+      m_debugger->isPaused())
+    return;
+  clearBreakDetails();
+  m_pausingOnNativeEvent = false;
+  m_skippedStepFrameCount = 0;
+  m_recursionLevelForStepFrame = 0;
+  m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
+  if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+  clearBreakDetails();
+  m_pausingOnNativeEvent = false;
+  m_debugger->setPauseOnNextStatement(false);
+}
+
+void V8DebuggerAgentImpl::pause(ErrorString* errorString) {
+  if (!checkEnabled(errorString)) return;
+  if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+  clearBreakDetails();
+  m_javaScriptPauseScheduled = true;
+  m_scheduledDebuggerStep = NoStep;
+  m_skippedStepFrameCount = 0;
+  m_steppingFromFramework = false;
+  m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::resume(ErrorString* errorString) {
+  if (!assertPaused(errorString)) return;
+  m_scheduledDebuggerStep = NoStep;
+  m_steppingFromFramework = false;
+  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_debugger->continueProgram();
+}
+
+void V8DebuggerAgentImpl::stepOver(ErrorString* errorString) {
+  if (!assertPaused(errorString)) return;
+  // StepOver at function return point should fallback to StepInto.
+  JavaScriptCallFrame* frame =
+      !m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
+  if (frame && frame->isAtReturn()) {
+    stepInto(errorString);
+    return;
+  }
+  m_scheduledDebuggerStep = StepOver;
+  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_debugger->stepOverStatement();
+}
+
+void V8DebuggerAgentImpl::stepInto(ErrorString* errorString) {
+  if (!assertPaused(errorString)) return;
+  m_scheduledDebuggerStep = StepInto;
+  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_debugger->stepIntoStatement();
+}
+
+void V8DebuggerAgentImpl::stepOut(ErrorString* errorString) {
+  if (!assertPaused(errorString)) return;
+  m_scheduledDebuggerStep = StepOut;
+  m_skipNextDebuggerStepOut = false;
+  m_recursionLevelForStepOut = 1;
+  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+  m_session->releaseObjectGroup(backtraceObjectGroup);
+  m_debugger->stepOutOfFunction();
+}
+
+void V8DebuggerAgentImpl::setPauseOnExceptions(
+    ErrorString* errorString, const String16& stringPauseState) {
+  if (!checkEnabled(errorString)) return;
+  V8Debugger::PauseOnExceptionsState pauseState;
+  if (stringPauseState == "none") {
+    pauseState = V8Debugger::DontPauseOnExceptions;
+  } else if (stringPauseState == "all") {
+    pauseState = V8Debugger::PauseOnAllExceptions;
+  } else if (stringPauseState == "uncaught") {
+    pauseState = V8Debugger::PauseOnUncaughtExceptions;
+  } else {
+    *errorString = "Unknown pause on exceptions mode: " + stringPauseState;
+    return;
+  }
+  setPauseOnExceptionsImpl(errorString, pauseState);
+}
+
+void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(ErrorString* errorString,
+                                                   int pauseState) {
+  m_debugger->setPauseOnExceptionsState(
+      static_cast<V8Debugger::PauseOnExceptionsState>(pauseState));
+  if (m_debugger->getPauseOnExceptionsState() != pauseState)
+    *errorString = "Internal error. Could not change pause on exceptions state";
+  else
+    m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
+}
+
+void V8DebuggerAgentImpl::evaluateOnCallFrame(
+    ErrorString* errorString, const String16& callFrameId,
+    const String16& expression, const Maybe<String16>& objectGroup,
+    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+    const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+    std::unique_ptr<RemoteObject>* result,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+  if (!assertPaused(errorString)) return;
+  InjectedScript::CallFrameScope scope(
+      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+  if (!scope.initialize()) return;
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+    *errorString = "Could not find call frame with given id";
+    return;
+  }
+
+  if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
+    return;
+  if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+
+  v8::MaybeLocal<v8::Value> maybeResultValue =
+      m_pausedCallFrames[scope.frameOrdinal()]->evaluate(
+          toV8String(m_isolate, expression));
+
+  // Re-initialize after running client's code, as it could have destroyed
+  // context or session.
+  if (!scope.initialize()) return;
+  scope.injectedScript()->wrapEvaluateResult(
+      errorString, maybeResultValue, scope.tryCatch(),
+      objectGroup.fromMaybe(""), returnByValue.fromMaybe(false),
+      generatePreview.fromMaybe(false), result, exceptionDetails);
+}
+
+void V8DebuggerAgentImpl::setVariableValue(
+    ErrorString* errorString, int scopeNumber, const String16& variableName,
+    std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
+    const String16& callFrameId) {
+  if (!checkEnabled(errorString)) return;
+  if (!assertPaused(errorString)) return;
+  InjectedScript::CallFrameScope scope(
+      errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+  if (!scope.initialize()) return;
+
+  v8::Local<v8::Value> newValue;
+  if (!scope.injectedScript()
+           ->resolveCallArgument(errorString, newValueArgument.get())
+           .ToLocal(&newValue))
+    return;
+
+  if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+    *errorString = "Could not find call frame with given id";
+    return;
+  }
+  v8::MaybeLocal<v8::Value> result =
+      m_pausedCallFrames[scope.frameOrdinal()]->setVariableValue(
+          scopeNumber, toV8String(m_isolate, variableName), newValue);
+  if (scope.tryCatch().HasCaught() || result.IsEmpty()) {
+    *errorString = "Internal error";
+    return;
+  }
+}
+
+void V8DebuggerAgentImpl::setAsyncCallStackDepth(ErrorString* errorString,
+                                                 int depth) {
+  if (!checkEnabled(errorString)) return;
+  m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
+  m_debugger->setAsyncCallStackDepth(this, depth);
+}
+
+void V8DebuggerAgentImpl::setBlackboxPatterns(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::Array<String16>> patterns) {
+  if (!patterns->length()) {
+    m_blackboxPattern = nullptr;
+    m_state->remove(DebuggerAgentState::blackboxPattern);
+    return;
+  }
+
+  String16Builder patternBuilder;
+  patternBuilder.append('(');
+  for (size_t i = 0; i < patterns->length() - 1; ++i) {
+    patternBuilder.append(patterns->get(i));
+    patternBuilder.append("|");
+  }
+  patternBuilder.append(patterns->get(patterns->length() - 1));
+  patternBuilder.append(')');
+  String16 pattern = patternBuilder.toString();
+  if (!setBlackboxPattern(errorString, pattern)) return;
+  m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
+}
+
+bool V8DebuggerAgentImpl::setBlackboxPattern(ErrorString* errorString,
+                                             const String16& pattern) {
+  std::unique_ptr<V8Regex> regex(new V8Regex(
+      m_inspector, pattern, true /** caseSensitive */, false /** multiline */));
+  if (!regex->isValid()) {
+    *errorString = "Pattern parser error: " + regex->errorMessage();
+    return false;
+  }
+  m_blackboxPattern = std::move(regex);
+  return true;
+}
+
+void V8DebuggerAgentImpl::setBlackboxedRanges(
+    ErrorString* error, const String16& scriptId,
+    std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
+        inPositions) {
+  if (m_scripts.find(scriptId) == m_scripts.end()) {
+    *error = "No script with passed id.";
+    return;
+  }
+
+  if (!inPositions->length()) {
+    m_blackboxedPositions.erase(scriptId);
+    return;
+  }
+
+  std::vector<std::pair<int, int>> positions;
+  positions.reserve(inPositions->length());
+  for (size_t i = 0; i < inPositions->length(); ++i) {
+    protocol::Debugger::ScriptPosition* position = inPositions->get(i);
+    if (position->getLineNumber() < 0) {
+      *error = "Position missing 'line' or 'line' < 0.";
+      return;
+    }
+    if (position->getColumnNumber() < 0) {
+      *error = "Position missing 'column' or 'column' < 0.";
+      return;
+    }
+    positions.push_back(
+        std::make_pair(position->getLineNumber(), position->getColumnNumber()));
+  }
+
+  for (size_t i = 1; i < positions.size(); ++i) {
+    if (positions[i - 1].first < positions[i].first) continue;
+    if (positions[i - 1].first == positions[i].first &&
+        positions[i - 1].second < positions[i].second)
+      continue;
+    *error =
+        "Input positions array is not sorted or contains duplicate values.";
+    return;
+  }
+
+  m_blackboxedPositions[scriptId] = positions;
+}
+
+void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
+  changeJavaScriptRecursionLevel(+1);
+  // Fast return.
+  if (m_scheduledDebuggerStep != StepInto) return;
+  schedulePauseOnNextStatementIfSteppingInto();
+}
+
+void V8DebuggerAgentImpl::didExecuteScript() {
+  changeJavaScriptRecursionLevel(-1);
+}
+
+void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
+  if (m_javaScriptPauseScheduled && !m_skipAllPauses &&
+      !m_debugger->isPaused()) {
+    // Do not ever loose user's pause request until we have actually paused.
+    m_debugger->setPauseOnNextStatement(true);
+  }
+  if (m_scheduledDebuggerStep == StepOut) {
+    m_recursionLevelForStepOut += step;
+    if (!m_recursionLevelForStepOut) {
+      // When StepOut crosses a task boundary (i.e. js -> c++) from where it was
+      // requested,
+      // switch stepping to step into a next JS task, as if we exited to a
+      // blackboxed framework.
+      m_scheduledDebuggerStep = StepInto;
+      m_skipNextDebuggerStepOut = false;
+    }
+  }
+  if (m_recursionLevelForStepFrame) {
+    m_recursionLevelForStepFrame += step;
+    if (!m_recursionLevelForStepFrame) {
+      // We have walked through a blackboxed framework and got back to where we
+      // started.
+      // If there was no stepping scheduled, we should cancel the stepping
+      // explicitly,
+      // since there may be a scheduled StepFrame left.
+      // Otherwise, if we were stepping in/over, the StepFrame will stop at the
+      // right location,
+      // whereas if we were stepping out, we should continue doing so after
+      // debugger pauses
+      // from the old StepFrame.
+      m_skippedStepFrameCount = 0;
+      if (m_scheduledDebuggerStep == NoStep)
+        m_debugger->clearStepping();
+      else if (m_scheduledDebuggerStep == StepOut)
+        m_skipNextDebuggerStepOut = true;
+    }
+  }
+}
+
+std::unique_ptr<Array<CallFrame>> V8DebuggerAgentImpl::currentCallFrames(
+    ErrorString* errorString) {
+  if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size())
+    return Array<CallFrame>::create();
+  ErrorString ignored;
+  v8::HandleScope handles(m_isolate);
+  v8::Local<v8::Context> debuggerContext =
+      v8::Debug::GetDebugContext(m_isolate);
+  v8::Context::Scope contextScope(debuggerContext);
+
+  v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
+
+  for (size_t frameOrdinal = 0; frameOrdinal < m_pausedCallFrames.size();
+       ++frameOrdinal) {
+    const std::unique_ptr<JavaScriptCallFrame>& currentCallFrame =
+        m_pausedCallFrames[frameOrdinal];
+
+    v8::Local<v8::Object> details = currentCallFrame->details();
+    if (hasInternalError(errorString, details.IsEmpty()))
+      return Array<CallFrame>::create();
+
+    int contextId = currentCallFrame->contextId();
+    InjectedScript* injectedScript =
+        contextId ? m_session->findInjectedScript(&ignored, contextId)
+                  : nullptr;
+
+    String16 callFrameId =
+        RemoteCallFrameId::serialize(contextId, static_cast<int>(frameOrdinal));
+    if (hasInternalError(
+            errorString,
+            !details
+                 ->Set(debuggerContext,
+                       toV8StringInternalized(m_isolate, "callFrameId"),
+                       toV8String(m_isolate, callFrameId))
+                 .FromMaybe(false)))
+      return Array<CallFrame>::create();
+
+    if (injectedScript) {
+      v8::Local<v8::Value> scopeChain;
+      if (hasInternalError(
+              errorString,
+              !details->Get(debuggerContext,
+                            toV8StringInternalized(m_isolate, "scopeChain"))
+                      .ToLocal(&scopeChain) ||
+                  !scopeChain->IsArray()))
+        return Array<CallFrame>::create();
+      v8::Local<v8::Array> scopeChainArray = scopeChain.As<v8::Array>();
+      if (!injectedScript->wrapPropertyInArray(
+              errorString, scopeChainArray,
+              toV8StringInternalized(m_isolate, "object"),
+              backtraceObjectGroup))
+        return Array<CallFrame>::create();
+      if (!injectedScript->wrapObjectProperty(
+              errorString, details, toV8StringInternalized(m_isolate, "this"),
+              backtraceObjectGroup))
+        return Array<CallFrame>::create();
+      if (details
+              ->Has(debuggerContext,
+                    toV8StringInternalized(m_isolate, "returnValue"))
+              .FromMaybe(false)) {
+        if (!injectedScript->wrapObjectProperty(
+                errorString, details,
+                toV8StringInternalized(m_isolate, "returnValue"),
+                backtraceObjectGroup))
+          return Array<CallFrame>::create();
+      }
+    } else {
+      if (hasInternalError(errorString, !details
+                                             ->Set(debuggerContext,
+                                                   toV8StringInternalized(
+                                                       m_isolate, "scopeChain"),
+                                                   v8::Array::New(m_isolate, 0))
+                                             .FromMaybe(false)))
+        return Array<CallFrame>::create();
+      v8::Local<v8::Object> remoteObject = v8::Object::New(m_isolate);
+      if (hasInternalError(
+              errorString,
+              !remoteObject
+                   ->Set(debuggerContext,
+                         toV8StringInternalized(m_isolate, "type"),
+                         toV8StringInternalized(m_isolate, "undefined"))
+                   .FromMaybe(false)))
+        return Array<CallFrame>::create();
+      if (hasInternalError(errorString,
+                           !details
+                                ->Set(debuggerContext,
+                                      toV8StringInternalized(m_isolate, "this"),
+                                      remoteObject)
+                                .FromMaybe(false)))
+        return Array<CallFrame>::create();
+      if (hasInternalError(
+              errorString,
+              !details
+                   ->Delete(debuggerContext,
+                            toV8StringInternalized(m_isolate, "returnValue"))
+                   .FromMaybe(false)))
+        return Array<CallFrame>::create();
+    }
+
+    if (hasInternalError(
+            errorString,
+            !objects
+                 ->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
+                 .FromMaybe(false)))
+      return Array<CallFrame>::create();
+  }
+
+  std::unique_ptr<protocol::Value> protocolValue =
+      toProtocolValue(errorString, debuggerContext, objects);
+  if (!protocolValue) return Array<CallFrame>::create();
+  protocol::ErrorSupport errorSupport;
+  std::unique_ptr<Array<CallFrame>> callFrames =
+      Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+  if (hasInternalError(errorString, !callFrames))
+    return Array<CallFrame>::create();
+  return callFrames;
+}
+
+std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
+  if (m_pausedContext.IsEmpty()) return nullptr;
+  V8StackTraceImpl* stackTrace = m_debugger->currentAsyncCallChain();
+  return stackTrace ? stackTrace->buildInspectorObjectForTail(m_debugger)
+                    : nullptr;
+}
+
+void V8DebuggerAgentImpl::didParseSource(
+    std::unique_ptr<V8DebuggerScript> script, bool success) {
+  v8::HandleScope handles(m_isolate);
+  String16 scriptSource = toProtocolString(script->source(m_isolate));
+  if (!success) script->setSourceURL(findSourceURL(scriptSource, false));
+  if (!success)
+    script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
+
+  std::unique_ptr<protocol::DictionaryValue> executionContextAuxData;
+  if (!script->executionContextAuxData().isEmpty())
+    executionContextAuxData = protocol::DictionaryValue::cast(
+        protocol::parseJSON(script->executionContextAuxData()));
+  bool isLiveEdit = script->isLiveEdit();
+  bool hasSourceURL = script->hasSourceURL();
+  String16 scriptId = script->scriptId();
+  String16 scriptURL = script->sourceURL();
+
+  const Maybe<String16>& sourceMapURLParam = script->sourceMappingURL();
+  const Maybe<protocol::DictionaryValue>& executionContextAuxDataParam(
+      std::move(executionContextAuxData));
+  const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
+  const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
+  if (success)
+    m_frontend.scriptParsed(
+        scriptId, scriptURL, script->startLine(), script->startColumn(),
+        script->endLine(), script->endColumn(), script->executionContextId(),
+        script->hash(), executionContextAuxDataParam, isLiveEditParam,
+        sourceMapURLParam, hasSourceURLParam);
+  else
+    m_frontend.scriptFailedToParse(
+        scriptId, scriptURL, script->startLine(), script->startColumn(),
+        script->endLine(), script->endColumn(), script->executionContextId(),
+        script->hash(), executionContextAuxDataParam, sourceMapURLParam,
+        hasSourceURLParam);
+
+  m_scripts[scriptId] = std::move(script);
+
+  if (scriptURL.isEmpty() || !success) return;
+
+  protocol::DictionaryValue* breakpointsCookie =
+      m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+  if (!breakpointsCookie) return;
+
+  for (size_t i = 0; i < breakpointsCookie->size(); ++i) {
+    auto cookie = breakpointsCookie->at(i);
+    protocol::DictionaryValue* breakpointObject =
+        protocol::DictionaryValue::cast(cookie.second);
+    bool isRegex;
+    breakpointObject->getBoolean(DebuggerAgentState::isRegex, &isRegex);
+    String16 url;
+    breakpointObject->getString(DebuggerAgentState::url, &url);
+    if (!matches(m_inspector, scriptURL, url, isRegex)) continue;
+    ScriptBreakpoint breakpoint;
+    breakpointObject->getInteger(DebuggerAgentState::lineNumber,
+                                 &breakpoint.lineNumber);
+    breakpointObject->getInteger(DebuggerAgentState::columnNumber,
+                                 &breakpoint.columnNumber);
+    breakpointObject->getString(DebuggerAgentState::condition,
+                                &breakpoint.condition);
+    std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
+        cookie.first, scriptId, breakpoint, UserBreakpointSource);
+    if (location)
+      m_frontend.breakpointResolved(cookie.first, std::move(location));
+  }
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
+    v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
+    const std::vector<String16>& hitBreakpoints, bool isPromiseRejection) {
+  JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
+  JavaScriptCallFrame* topCallFrame =
+      !callFrames.empty() ? callFrames.begin()->get() : nullptr;
+
+  V8DebuggerAgentImpl::SkipPauseRequest result;
+  if (m_skipAllPauses)
+    result = RequestContinue;
+  else if (!hitBreakpoints.empty())
+    result = RequestNoSkip;  // Don't skip explicit breakpoints even if set in
+                             // frameworks.
+  else if (!exception.IsEmpty())
+    result = shouldSkipExceptionPause(topCallFrame);
+  else if (m_scheduledDebuggerStep != NoStep || m_javaScriptPauseScheduled ||
+           m_pausingOnNativeEvent)
+    result = shouldSkipStepPause(topCallFrame);
+  else
+    result = RequestNoSkip;
+
+  m_skipNextDebuggerStepOut = false;
+  if (result != RequestNoSkip) return result;
+  // Skip pauses inside V8 internal scripts and on syntax errors.
+  if (!topCallFrame) return RequestContinue;
+
+  DCHECK(m_pausedContext.IsEmpty());
+  JavaScriptCallFrames frames = m_debugger->currentCallFrames();
+  m_pausedCallFrames.swap(frames);
+  m_pausedContext.Reset(m_isolate, context);
+  v8::HandleScope handles(m_isolate);
+
+  if (!exception.IsEmpty()) {
+    ErrorString ignored;
+    InjectedScript* injectedScript =
+        m_session->findInjectedScript(&ignored, V8Debugger::contextId(context));
+    if (injectedScript) {
+      m_breakReason =
+          isPromiseRejection
+              ? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
+              : protocol::Debugger::Paused::ReasonEnum::Exception;
+      ErrorString errorString;
+      auto obj = injectedScript->wrapObject(&errorString, exception,
+                                            backtraceObjectGroup);
+      m_breakAuxData = obj ? obj->serialize() : nullptr;
+      // m_breakAuxData might be null after this.
+    }
+  }
+
+  std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
+
+  for (const auto& point : hitBreakpoints) {
+    DebugServerBreakpointToBreakpointIdAndSourceMap::iterator
+        breakpointIterator = m_serverBreakpoints.find(point);
+    if (breakpointIterator != m_serverBreakpoints.end()) {
+      const String16& localId = breakpointIterator->second.first;
+      hitBreakpointIds->addItem(localId);
+
+      BreakpointSource source = breakpointIterator->second.second;
+      if (m_breakReason == protocol::Debugger::Paused::ReasonEnum::Other &&
+          source == DebugCommandBreakpointSource)
+        m_breakReason = protocol::Debugger::Paused::ReasonEnum::DebugCommand;
+    }
+  }
+
+  ErrorString errorString;
+  m_frontend.paused(currentCallFrames(&errorString), m_breakReason,
+                    std::move(m_breakAuxData), std::move(hitBreakpointIds),
+                    currentAsyncStackTrace());
+  m_scheduledDebuggerStep = NoStep;
+  m_javaScriptPauseScheduled = false;
+  m_steppingFromFramework = false;
+  m_pausingOnNativeEvent = false;
+  m_skippedStepFrameCount = 0;
+  m_recursionLevelForStepFrame = 0;
+
+  if (!m_continueToLocationBreakpointId.isEmpty()) {
+    m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
+    m_continueToLocationBreakpointId = "";
+  }
+  return result;
+}
+
+void V8DebuggerAgentImpl::didContinue() {
+  m_pausedContext.Reset();
+  JavaScriptCallFrames emptyCallFrames;
+  m_pausedCallFrames.swap(emptyCallFrames);
+  clearBreakDetails();
+  m_frontend.resumed();
+}
+
+void V8DebuggerAgentImpl::breakProgram(
+    const String16& breakReason,
+    std::unique_ptr<protocol::DictionaryValue> data) {
+  if (!enabled() || m_skipAllPauses || !m_pausedContext.IsEmpty() ||
+      isCurrentCallStackEmptyOrBlackboxed() ||
+      !m_debugger->breakpointsActivated())
+    return;
+  m_breakReason = breakReason;
+  m_breakAuxData = std::move(data);
+  m_scheduledDebuggerStep = NoStep;
+  m_steppingFromFramework = false;
+  m_pausingOnNativeEvent = false;
+  m_debugger->breakProgram();
+}
+
+void V8DebuggerAgentImpl::breakProgramOnException(
+    const String16& breakReason,
+    std::unique_ptr<protocol::DictionaryValue> data) {
+  if (!enabled() ||
+      m_debugger->getPauseOnExceptionsState() ==
+          V8Debugger::DontPauseOnExceptions)
+    return;
+  breakProgram(breakReason, std::move(data));
+}
+
+bool V8DebuggerAgentImpl::assertPaused(ErrorString* errorString) {
+  if (m_pausedContext.IsEmpty()) {
+    *errorString = "Can only perform operation while paused.";
+    return false;
+  }
+  return true;
+}
+
+void V8DebuggerAgentImpl::clearBreakDetails() {
+  m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
+  m_breakAuxData = nullptr;
+}
+
+void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
+                                          int lineNumber, int columnNumber,
+                                          BreakpointSource source,
+                                          const String16& condition) {
+  String16 breakpointId =
+      generateBreakpointId(scriptId, lineNumber, columnNumber, source);
+  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+  resolveBreakpoint(breakpointId, scriptId, breakpoint, source);
+}
+
+void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
+                                             int lineNumber, int columnNumber,
+                                             BreakpointSource source) {
+  removeBreakpoint(
+      generateBreakpointId(scriptId, lineNumber, columnNumber, source));
+}
+
+void V8DebuggerAgentImpl::reset() {
+  if (!enabled()) return;
+  m_scheduledDebuggerStep = NoStep;
+  m_scripts.clear();
+  m_blackboxedPositions.clear();
+  m_breakpointIdToDebuggerBreakpointIds.clear();
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-debugger-agent-impl.h b/src/inspector/v8-debugger-agent-impl.h
new file mode 100644
index 0000000..62aa67b
--- /dev/null
+++ b/src/inspector/v8-debugger-agent-impl.h
@@ -0,0 +1,224 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#define V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint;
+class JavaScriptCallFrame;
+class PromiseTracker;
+class V8Debugger;
+class V8DebuggerScript;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+class V8Regex;
+class V8StackTraceImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
+ public:
+  enum SkipPauseRequest {
+    RequestNoSkip,
+    RequestContinue,
+    RequestStepInto,
+    RequestStepOut,
+    RequestStepFrame
+  };
+
+  enum BreakpointSource {
+    UserBreakpointSource,
+    DebugCommandBreakpointSource,
+    MonitorCommandBreakpointSource
+  };
+
+  V8DebuggerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                      protocol::DictionaryValue* state);
+  ~V8DebuggerAgentImpl() override;
+  void restore();
+
+  // Part of the protocol.
+  void enable(ErrorString*) override;
+  void disable(ErrorString*) override;
+  void setBreakpointsActive(ErrorString*, bool active) override;
+  void setSkipAllPauses(ErrorString*, bool skip) override;
+  void setBreakpointByUrl(
+      ErrorString*, int lineNumber, const Maybe<String16>& optionalURL,
+      const Maybe<String16>& optionalURLRegex,
+      const Maybe<int>& optionalColumnNumber,
+      const Maybe<String16>& optionalCondition, String16*,
+      std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
+      override;
+  void setBreakpoint(
+      ErrorString*, std::unique_ptr<protocol::Debugger::Location>,
+      const Maybe<String16>& optionalCondition, String16*,
+      std::unique_ptr<protocol::Debugger::Location>* actualLocation) override;
+  void removeBreakpoint(ErrorString*, const String16& breakpointId) override;
+  void continueToLocation(
+      ErrorString*, std::unique_ptr<protocol::Debugger::Location>) override;
+  void searchInContent(
+      ErrorString*, const String16& scriptId, const String16& query,
+      const Maybe<bool>& optionalCaseSensitive,
+      const Maybe<bool>& optionalIsRegex,
+      std::unique_ptr<protocol::Array<protocol::Debugger::SearchMatch>>*)
+      override;
+  void setScriptSource(
+      ErrorString*, const String16& inScriptId, const String16& inScriptSource,
+      const Maybe<bool>& dryRun,
+      Maybe<protocol::Array<protocol::Debugger::CallFrame>>* optOutCallFrames,
+      Maybe<bool>* optOutStackChanged,
+      Maybe<protocol::Runtime::StackTrace>* optOutAsyncStackTrace,
+      Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) override;
+  void restartFrame(
+      ErrorString*, const String16& callFrameId,
+      std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*
+          newCallFrames,
+      Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) override;
+  void getScriptSource(ErrorString*, const String16& scriptId,
+                       String16* scriptSource) override;
+  void pause(ErrorString*) override;
+  void resume(ErrorString*) override;
+  void stepOver(ErrorString*) override;
+  void stepInto(ErrorString*) override;
+  void stepOut(ErrorString*) override;
+  void setPauseOnExceptions(ErrorString*, const String16& pauseState) override;
+  void evaluateOnCallFrame(
+      ErrorString*, const String16& callFrameId, const String16& expression,
+      const Maybe<String16>& objectGroup,
+      const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+      const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+      std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+      Maybe<protocol::Runtime::ExceptionDetails>*) override;
+  void setVariableValue(
+      ErrorString*, int scopeNumber, const String16& variableName,
+      std::unique_ptr<protocol::Runtime::CallArgument> newValue,
+      const String16& callFrame) override;
+  void setAsyncCallStackDepth(ErrorString*, int depth) override;
+  void setBlackboxPatterns(
+      ErrorString*,
+      std::unique_ptr<protocol::Array<String16>> patterns) override;
+  void setBlackboxedRanges(
+      ErrorString*, const String16& scriptId,
+      std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
+          positions) override;
+
+  bool enabled();
+
+  void setBreakpointAt(const String16& scriptId, int lineNumber,
+                       int columnNumber, BreakpointSource,
+                       const String16& condition = String16());
+  void removeBreakpointAt(const String16& scriptId, int lineNumber,
+                          int columnNumber, BreakpointSource);
+  void schedulePauseOnNextStatement(
+      const String16& breakReason,
+      std::unique_ptr<protocol::DictionaryValue> data);
+  void cancelPauseOnNextStatement();
+  void breakProgram(const String16& breakReason,
+                    std::unique_ptr<protocol::DictionaryValue> data);
+  void breakProgramOnException(const String16& breakReason,
+                               std::unique_ptr<protocol::DictionaryValue> data);
+
+  void reset();
+
+  // Interface for V8InspectorImpl
+  SkipPauseRequest didPause(v8::Local<v8::Context>,
+                            v8::Local<v8::Value> exception,
+                            const std::vector<String16>& hitBreakpoints,
+                            bool isPromiseRejection);
+  void didContinue();
+  void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
+  void willExecuteScript(int scriptId);
+  void didExecuteScript();
+
+  v8::Isolate* isolate() { return m_isolate; }
+
+ private:
+  bool checkEnabled(ErrorString*);
+  void enable();
+
+  SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
+  SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
+
+  void schedulePauseOnNextStatementIfSteppingInto();
+
+  std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>
+  currentCallFrames(ErrorString*);
+  std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
+
+  void changeJavaScriptRecursionLevel(int step);
+
+  void setPauseOnExceptionsImpl(ErrorString*, int);
+
+  std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
+      const String16& breakpointId, const String16& scriptId,
+      const ScriptBreakpoint&, BreakpointSource);
+  void removeBreakpoint(const String16& breakpointId);
+  bool assertPaused(ErrorString*);
+  void clearBreakDetails();
+
+  bool isCurrentCallStackEmptyOrBlackboxed();
+  bool isTopPausedCallFrameBlackboxed();
+  bool isCallFrameWithUnknownScriptOrBlackboxed(JavaScriptCallFrame*);
+
+  void internalSetAsyncCallStackDepth(int);
+  void increaseCachedSkipStackGeneration();
+
+  bool setBlackboxPattern(ErrorString*, const String16& pattern);
+
+  using ScriptsMap =
+      protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
+  using BreakpointIdToDebuggerBreakpointIdsMap =
+      protocol::HashMap<String16, std::vector<String16>>;
+  using DebugServerBreakpointToBreakpointIdAndSourceMap =
+      protocol::HashMap<String16, std::pair<String16, BreakpointSource>>;
+  using MuteBreakpoins = protocol::HashMap<String16, std::pair<String16, int>>;
+
+  enum DebuggerStep { NoStep = 0, StepInto, StepOver, StepOut };
+
+  V8InspectorImpl* m_inspector;
+  V8Debugger* m_debugger;
+  V8InspectorSessionImpl* m_session;
+  bool m_enabled;
+  protocol::DictionaryValue* m_state;
+  protocol::Debugger::Frontend m_frontend;
+  v8::Isolate* m_isolate;
+  v8::Global<v8::Context> m_pausedContext;
+  JavaScriptCallFrames m_pausedCallFrames;
+  ScriptsMap m_scripts;
+  BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
+  DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
+  String16 m_continueToLocationBreakpointId;
+  String16 m_breakReason;
+  std::unique_ptr<protocol::DictionaryValue> m_breakAuxData;
+  DebuggerStep m_scheduledDebuggerStep;
+  bool m_skipNextDebuggerStepOut;
+  bool m_javaScriptPauseScheduled;
+  bool m_steppingFromFramework;
+  bool m_pausingOnNativeEvent;
+
+  int m_skippedStepFrameCount;
+  int m_recursionLevelForStepOut;
+  int m_recursionLevelForStepFrame;
+  bool m_skipAllPauses;
+
+  std::unique_ptr<V8Regex> m_blackboxPattern;
+  protocol::HashMap<String16, std::vector<std::pair<int, int>>>
+      m_blackboxedPositions;
+
+  DISALLOW_COPY_AND_ASSIGN(V8DebuggerAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
diff --git a/src/inspector/v8-debugger-script.cc b/src/inspector/v8-debugger-script.cc
new file mode 100644
index 0000000..485188a
--- /dev/null
+++ b/src/inspector/v8-debugger-script.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger-script.h"
+
+#include "src/inspector/protocol-platform.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+static const char hexDigits[17] = "0123456789ABCDEF";
+
+static void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
+  for (size_t i = 0; i < 8; ++i) {
+    UChar c = hexDigits[number & 0xF];
+    destination->append(c);
+    number >>= 4;
+  }
+}
+
+// Hash algorithm for substrings is described in "Über die Komplexität der
+// Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfe.
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+static String16 calculateHash(const String16& str) {
+  static uint64_t prime[] = {0x3FB75161, 0xAB1F4E4F, 0x82675BC5, 0xCD924D35,
+                             0x81ABE279};
+  static uint64_t random[] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
+                              0xC3D2E1F0};
+  static uint32_t randomOdd[] = {0xB4663807, 0xCC322BF5, 0xD4F91BBD, 0xA7BEA11D,
+                                 0x8F462907};
+
+  uint64_t hashes[] = {0, 0, 0, 0, 0};
+  uint64_t zi[] = {1, 1, 1, 1, 1};
+
+  const size_t hashesSize = arraysize(hashes);
+
+  size_t current = 0;
+  const uint32_t* data = nullptr;
+  size_t sizeInBytes = sizeof(UChar) * str.length();
+  data = reinterpret_cast<const uint32_t*>(str.characters16());
+  for (size_t i = 0; i < sizeInBytes / 4; i += 4) {
+    uint32_t v = data[i];
+    uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
+    hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
+    zi[current] = (zi[current] * random[current]) % prime[current];
+    current = current == hashesSize - 1 ? 0 : current + 1;
+  }
+  if (sizeInBytes % 4) {
+    uint32_t v = 0;
+    for (size_t i = sizeInBytes - sizeInBytes % 4; i < sizeInBytes; ++i) {
+      v <<= 8;
+      v |= reinterpret_cast<const uint8_t*>(data)[i];
+    }
+    uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
+    hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
+    zi[current] = (zi[current] * random[current]) % prime[current];
+    current = current == hashesSize - 1 ? 0 : current + 1;
+  }
+
+  for (size_t i = 0; i < hashesSize; ++i)
+    hashes[i] = (hashes[i] + zi[i] * (prime[i] - 1)) % prime[i];
+
+  String16Builder hash;
+  for (size_t i = 0; i < hashesSize; ++i) appendUnsignedAsHex(hashes[i], &hash);
+  return hash.toString();
+}
+
+static v8::Local<v8::Value> GetChecked(v8::Local<v8::Context> context,
+                                       v8::Local<v8::Object> object,
+                                       const char* name) {
+  return object
+      ->Get(context, toV8StringInternalized(context->GetIsolate(), name))
+      .ToLocalChecked();
+}
+
+static int GetCheckedInt(v8::Local<v8::Context> context,
+                         v8::Local<v8::Object> object, const char* name) {
+  return static_cast<int>(GetChecked(context, object, name)
+                              ->ToInteger(context)
+                              .ToLocalChecked()
+                              ->Value());
+}
+
+V8DebuggerScript::V8DebuggerScript(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Object> object,
+                                   bool isLiveEdit) {
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Value> idValue = GetChecked(context, object, "id");
+  DCHECK(!idValue.IsEmpty() && idValue->IsInt32());
+  m_id = String16::fromInteger(idValue->Int32Value(context).FromJust());
+
+  m_url = toProtocolStringWithTypeCheck(GetChecked(context, object, "name"));
+  m_sourceURL =
+      toProtocolStringWithTypeCheck(GetChecked(context, object, "sourceURL"));
+  m_sourceMappingURL = toProtocolStringWithTypeCheck(
+      GetChecked(context, object, "sourceMappingURL"));
+  m_startLine = GetCheckedInt(context, object, "startLine");
+  m_startColumn = GetCheckedInt(context, object, "startColumn");
+  m_endLine = GetCheckedInt(context, object, "endLine");
+  m_endColumn = GetCheckedInt(context, object, "endColumn");
+  m_executionContextAuxData = toProtocolStringWithTypeCheck(
+      GetChecked(context, object, "executionContextAuxData"));
+  m_executionContextId = GetCheckedInt(context, object, "executionContextId");
+  m_isLiveEdit = isLiveEdit;
+
+  v8::Local<v8::Value> sourceValue;
+  if (!object->Get(context, toV8StringInternalized(isolate, "source"))
+           .ToLocal(&sourceValue) ||
+      !sourceValue->IsString())
+    return;
+  setSource(isolate, sourceValue.As<v8::String>());
+}
+
+V8DebuggerScript::~V8DebuggerScript() {}
+
+const String16& V8DebuggerScript::sourceURL() const {
+  return m_sourceURL.isEmpty() ? m_url : m_sourceURL;
+}
+
+v8::Local<v8::String> V8DebuggerScript::source(v8::Isolate* isolate) const {
+  return m_source.Get(isolate);
+}
+
+void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
+  m_sourceURL = sourceURL;
+}
+
+void V8DebuggerScript::setSourceMappingURL(const String16& sourceMappingURL) {
+  m_sourceMappingURL = sourceMappingURL;
+}
+
+void V8DebuggerScript::setSource(v8::Isolate* isolate,
+                                 v8::Local<v8::String> source) {
+  m_source.Reset(isolate, source);
+  m_hash = calculateHash(toProtocolString(source));
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-debugger-script.h b/src/inspector/v8-debugger-script.h
new file mode 100644
index 0000000..78c44b5
--- /dev/null
+++ b/src/inspector/v8-debugger-script.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#define V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8DebuggerScript {
+ public:
+  V8DebuggerScript(v8::Local<v8::Context>, v8::Local<v8::Object>,
+                   bool isLiveEdit);
+  ~V8DebuggerScript();
+
+  const String16& scriptId() const { return m_id; }
+  const String16& url() const { return m_url; }
+  bool hasSourceURL() const { return !m_sourceURL.isEmpty(); }
+  const String16& sourceURL() const;
+  const String16& sourceMappingURL() const { return m_sourceMappingURL; }
+  v8::Local<v8::String> source(v8::Isolate*) const;
+  const String16& hash() const { return m_hash; }
+  int startLine() const { return m_startLine; }
+  int startColumn() const { return m_startColumn; }
+  int endLine() const { return m_endLine; }
+  int endColumn() const { return m_endColumn; }
+  int executionContextId() const { return m_executionContextId; }
+  const String16& executionContextAuxData() const {
+    return m_executionContextAuxData;
+  }
+  bool isLiveEdit() const { return m_isLiveEdit; }
+
+  void setSourceURL(const String16&);
+  void setSourceMappingURL(const String16&);
+  void setSource(v8::Isolate*, v8::Local<v8::String>);
+
+ private:
+  String16 m_id;
+  String16 m_url;
+  String16 m_sourceURL;
+  String16 m_sourceMappingURL;
+  v8::Global<v8::String> m_source;
+  String16 m_hash;
+  int m_startLine;
+  int m_startColumn;
+  int m_endLine;
+  int m_endColumn;
+  int m_executionContextId;
+  String16 m_executionContextAuxData;
+  bool m_isLiveEdit;
+
+  DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
diff --git a/src/inspector/v8-debugger.cc b/src/inspector/v8-debugger.cc
new file mode 100644
index 0000000..d393f81
--- /dev/null
+++ b/src/inspector/v8-debugger.cc
@@ -0,0 +1,1002 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger.h"
+
+#include "src/inspector/debugger-script.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-internal-value-type.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+namespace v8_inspector {
+
+namespace {
+const char stepIntoV8MethodName[] = "stepIntoStatement";
+const char stepOutV8MethodName[] = "stepOutOfFunction";
+static const char v8AsyncTaskEventEnqueue[] = "enqueue";
+static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
+static const char v8AsyncTaskEventWillHandle[] = "willHandle";
+static const char v8AsyncTaskEventDidHandle[] = "didHandle";
+static const char v8AsyncTaskEventCancel[] = "cancel";
+
+inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
+  return value ? v8::True(isolate) : v8::False(isolate);
+}
+
+}  // namespace
+
+static bool inLiveEditScope = false;
+
+v8::MaybeLocal<v8::Value> V8Debugger::callDebuggerMethod(
+    const char* functionName, int argc, v8::Local<v8::Value> argv[]) {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  DCHECK(m_isolate->InContext());
+  v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+  v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      debuggerScript
+          ->Get(context, toV8StringInternalized(m_isolate, functionName))
+          .ToLocalChecked());
+  return function->Call(context, debuggerScript, argc, argv);
+}
+
+V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
+    : m_isolate(isolate),
+      m_inspector(inspector),
+      m_lastContextId(0),
+      m_enableCount(0),
+      m_breakpointsActivated(true),
+      m_runningNestedMessageLoop(false),
+      m_ignoreScriptParsedEventsCounter(0),
+      m_maxAsyncCallStackDepth(0) {}
+
+V8Debugger::~V8Debugger() {}
+
+void V8Debugger::enable() {
+  if (m_enableCount++) return;
+  DCHECK(!enabled());
+  v8::HandleScope scope(m_isolate);
+  v8::Debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
+                                   v8::External::New(m_isolate, this));
+  m_debuggerContext.Reset(m_isolate, v8::Debug::GetDebugContext(m_isolate));
+  compileDebuggerScript();
+}
+
+void V8Debugger::disable() {
+  if (--m_enableCount) return;
+  DCHECK(enabled());
+  clearBreakpoints();
+  m_debuggerScript.Reset();
+  m_debuggerContext.Reset();
+  allAsyncTasksCanceled();
+  v8::Debug::SetDebugEventListener(m_isolate, nullptr);
+}
+
+bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
+
+// static
+int V8Debugger::contextId(v8::Local<v8::Context> context) {
+  v8::Local<v8::Value> data =
+      context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+  if (data.IsEmpty() || !data->IsString()) return 0;
+  String16 dataString = toProtocolString(data.As<v8::String>());
+  if (dataString.isEmpty()) return 0;
+  size_t commaPos = dataString.find(",");
+  if (commaPos == String16::kNotFound) return 0;
+  size_t commaPos2 = dataString.find(",", commaPos + 1);
+  if (commaPos2 == String16::kNotFound) return 0;
+  return dataString.substring(commaPos + 1, commaPos2 - commaPos - 1)
+      .toInteger();
+}
+
+// static
+int V8Debugger::getGroupId(v8::Local<v8::Context> context) {
+  v8::Local<v8::Value> data =
+      context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+  if (data.IsEmpty() || !data->IsString()) return 0;
+  String16 dataString = toProtocolString(data.As<v8::String>());
+  if (dataString.isEmpty()) return 0;
+  size_t commaPos = dataString.find(",");
+  if (commaPos == String16::kNotFound) return 0;
+  return dataString.substring(0, commaPos).toInteger();
+}
+
+void V8Debugger::getCompiledScripts(
+    int contextGroupId,
+    std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
+  v8::HandleScope scope(m_isolate);
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
+  DCHECK(!debuggerScript->IsUndefined());
+  v8::Local<v8::Function> getScriptsFunction = v8::Local<v8::Function>::Cast(
+      debuggerScript
+          ->Get(context, toV8StringInternalized(m_isolate, "getScripts"))
+          .ToLocalChecked());
+  v8::Local<v8::Value> argv[] = {v8::Integer::New(m_isolate, contextGroupId)};
+  v8::Local<v8::Value> value;
+  if (!getScriptsFunction->Call(context, debuggerScript, arraysize(argv), argv)
+           .ToLocal(&value))
+    return;
+  DCHECK(value->IsArray());
+  v8::Local<v8::Array> scriptsArray = v8::Local<v8::Array>::Cast(value);
+  result.reserve(scriptsArray->Length());
+  for (unsigned i = 0; i < scriptsArray->Length(); ++i) {
+    v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(
+        scriptsArray->Get(context, v8::Integer::New(m_isolate, i))
+            .ToLocalChecked());
+    result.push_back(wrapUnique(
+        new V8DebuggerScript(context, scriptObject, inLiveEditScope)));
+  }
+}
+
+String16 V8Debugger::setBreakpoint(const String16& sourceID,
+                                   const ScriptBreakpoint& scriptBreakpoint,
+                                   int* actualLineNumber,
+                                   int* actualColumnNumber) {
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Context::Scope contextScope(context);
+
+  v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+  bool success = false;
+  success = info->Set(context, toV8StringInternalized(m_isolate, "sourceID"),
+                      toV8String(m_isolate, sourceID))
+                .FromMaybe(false);
+  DCHECK(success);
+  success = info->Set(context, toV8StringInternalized(m_isolate, "lineNumber"),
+                      v8::Integer::New(m_isolate, scriptBreakpoint.lineNumber))
+                .FromMaybe(false);
+  DCHECK(success);
+  success =
+      info->Set(context, toV8StringInternalized(m_isolate, "columnNumber"),
+                v8::Integer::New(m_isolate, scriptBreakpoint.columnNumber))
+          .FromMaybe(false);
+  DCHECK(success);
+  success = info->Set(context, toV8StringInternalized(m_isolate, "condition"),
+                      toV8String(m_isolate, scriptBreakpoint.condition))
+                .FromMaybe(false);
+  DCHECK(success);
+
+  v8::Local<v8::Function> setBreakpointFunction = v8::Local<v8::Function>::Cast(
+      m_debuggerScript.Get(m_isolate)
+          ->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
+          .ToLocalChecked());
+  v8::Local<v8::Value> breakpointId =
+      v8::Debug::Call(debuggerContext(), setBreakpointFunction, info)
+          .ToLocalChecked();
+  if (!breakpointId->IsString()) return "";
+  *actualLineNumber =
+      info->Get(context, toV8StringInternalized(m_isolate, "lineNumber"))
+          .ToLocalChecked()
+          ->Int32Value(context)
+          .FromJust();
+  *actualColumnNumber =
+      info->Get(context, toV8StringInternalized(m_isolate, "columnNumber"))
+          .ToLocalChecked()
+          ->Int32Value(context)
+          .FromJust();
+  return toProtocolString(breakpointId.As<v8::String>());
+}
+
+void V8Debugger::removeBreakpoint(const String16& breakpointId) {
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Context::Scope contextScope(context);
+
+  v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+  bool success = false;
+  success =
+      info->Set(context, toV8StringInternalized(m_isolate, "breakpointId"),
+                toV8String(m_isolate, breakpointId))
+          .FromMaybe(false);
+  DCHECK(success);
+
+  v8::Local<v8::Function> removeBreakpointFunction =
+      v8::Local<v8::Function>::Cast(
+          m_debuggerScript.Get(m_isolate)
+              ->Get(context,
+                    toV8StringInternalized(m_isolate, "removeBreakpoint"))
+              .ToLocalChecked());
+  v8::Debug::Call(debuggerContext(), removeBreakpointFunction, info)
+      .ToLocalChecked();
+}
+
+void V8Debugger::clearBreakpoints() {
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Context::Scope contextScope(context);
+
+  v8::Local<v8::Function> clearBreakpoints = v8::Local<v8::Function>::Cast(
+      m_debuggerScript.Get(m_isolate)
+          ->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
+          .ToLocalChecked());
+  v8::Debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
+}
+
+void V8Debugger::setBreakpointsActivated(bool activated) {
+  if (!enabled()) {
+    UNREACHABLE();
+    return;
+  }
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Context::Scope contextScope(context);
+
+  v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+  bool success = false;
+  success = info->Set(context, toV8StringInternalized(m_isolate, "enabled"),
+                      v8::Boolean::New(m_isolate, activated))
+                .FromMaybe(false);
+  DCHECK(success);
+  v8::Local<v8::Function> setBreakpointsActivated =
+      v8::Local<v8::Function>::Cast(
+          m_debuggerScript.Get(m_isolate)
+              ->Get(context, toV8StringInternalized(m_isolate,
+                                                    "setBreakpointsActivated"))
+              .ToLocalChecked());
+  v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info)
+      .ToLocalChecked();
+
+  m_breakpointsActivated = activated;
+}
+
+V8Debugger::PauseOnExceptionsState V8Debugger::getPauseOnExceptionsState() {
+  DCHECK(enabled());
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Context> context = debuggerContext();
+  v8::Context::Scope contextScope(context);
+
+  v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
+  v8::Local<v8::Value> result =
+      callDebuggerMethod("pauseOnExceptionsState", 0, argv).ToLocalChecked();
+  return static_cast<V8Debugger::PauseOnExceptionsState>(
+      result->Int32Value(context).FromJust());
+}
+
+void V8Debugger::setPauseOnExceptionsState(
+    PauseOnExceptionsState pauseOnExceptionsState) {
+  DCHECK(enabled());
+  v8::HandleScope scope(m_isolate);
+  v8::Context::Scope contextScope(debuggerContext());
+
+  v8::Local<v8::Value> argv[] = {
+      v8::Int32::New(m_isolate, pauseOnExceptionsState)};
+  callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
+}
+
+void V8Debugger::setPauseOnNextStatement(bool pause) {
+  if (m_runningNestedMessageLoop) return;
+  if (pause)
+    v8::Debug::DebugBreak(m_isolate);
+  else
+    v8::Debug::CancelDebugBreak(m_isolate);
+}
+
+bool V8Debugger::canBreakProgram() {
+  if (!m_breakpointsActivated) return false;
+  return m_isolate->InContext();
+}
+
+void V8Debugger::breakProgram() {
+  if (isPaused()) {
+    DCHECK(!m_runningNestedMessageLoop);
+    v8::Local<v8::Value> exception;
+    v8::Local<v8::Array> hitBreakpoints;
+    handleProgramBreak(m_pausedContext, m_executionState, exception,
+                       hitBreakpoints);
+    return;
+  }
+
+  if (!canBreakProgram()) return;
+
+  v8::HandleScope scope(m_isolate);
+  v8::Local<v8::Function> breakFunction;
+  if (!v8::Function::New(m_isolate->GetCurrentContext(),
+                         &V8Debugger::breakProgramCallback,
+                         v8::External::New(m_isolate, this), 0,
+                         v8::ConstructorBehavior::kThrow)
+           .ToLocal(&breakFunction))
+    return;
+  v8::Debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
+}
+
+void V8Debugger::continueProgram() {
+  if (isPaused()) m_inspector->client()->quitMessageLoopOnPause();
+  m_pausedContext.Clear();
+  m_executionState.Clear();
+}
+
+void V8Debugger::stepIntoStatement() {
+  DCHECK(isPaused());
+  DCHECK(!m_executionState.IsEmpty());
+  v8::HandleScope handleScope(m_isolate);
+  v8::Local<v8::Value> argv[] = {m_executionState};
+  callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+  continueProgram();
+}
+
+void V8Debugger::stepOverStatement() {
+  DCHECK(isPaused());
+  DCHECK(!m_executionState.IsEmpty());
+  v8::HandleScope handleScope(m_isolate);
+  v8::Local<v8::Value> argv[] = {m_executionState};
+  callDebuggerMethod("stepOverStatement", 1, argv);
+  continueProgram();
+}
+
+void V8Debugger::stepOutOfFunction() {
+  DCHECK(isPaused());
+  DCHECK(!m_executionState.IsEmpty());
+  v8::HandleScope handleScope(m_isolate);
+  v8::Local<v8::Value> argv[] = {m_executionState};
+  callDebuggerMethod(stepOutV8MethodName, 1, argv);
+  continueProgram();
+}
+
+void V8Debugger::clearStepping() {
+  DCHECK(enabled());
+  v8::HandleScope scope(m_isolate);
+  v8::Context::Scope contextScope(debuggerContext());
+
+  v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
+  callDebuggerMethod("clearStepping", 0, argv);
+}
+
+bool V8Debugger::setScriptSource(
+    const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
+    ErrorString* error,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
+    JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged) {
+  class EnableLiveEditScope {
+   public:
+    explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
+      v8::Debug::SetLiveEditEnabled(m_isolate, true);
+      inLiveEditScope = true;
+    }
+    ~EnableLiveEditScope() {
+      v8::Debug::SetLiveEditEnabled(m_isolate, false);
+      inLiveEditScope = false;
+    }
+
+   private:
+    v8::Isolate* m_isolate;
+  };
+
+  DCHECK(enabled());
+  v8::HandleScope scope(m_isolate);
+
+  std::unique_ptr<v8::Context::Scope> contextScope;
+  if (!isPaused())
+    contextScope = wrapUnique(new v8::Context::Scope(debuggerContext()));
+
+  v8::Local<v8::Value> argv[] = {toV8String(m_isolate, sourceID), newSource,
+                                 v8Boolean(dryRun, m_isolate)};
+
+  v8::Local<v8::Value> v8result;
+  {
+    EnableLiveEditScope enableLiveEditScope(m_isolate);
+    v8::TryCatch tryCatch(m_isolate);
+    tryCatch.SetVerbose(false);
+    v8::MaybeLocal<v8::Value> maybeResult =
+        callDebuggerMethod("liveEditScriptSource", 3, argv);
+    if (tryCatch.HasCaught()) {
+      v8::Local<v8::Message> message = tryCatch.Message();
+      if (!message.IsEmpty())
+        *error = toProtocolStringWithTypeCheck(message->Get());
+      else
+        *error = "Unknown error.";
+      return false;
+    }
+    v8result = maybeResult.ToLocalChecked();
+  }
+  DCHECK(!v8result.IsEmpty());
+  v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+  v8::Local<v8::Object> resultTuple =
+      v8result->ToObject(context).ToLocalChecked();
+  int code = static_cast<int>(resultTuple->Get(context, 0)
+                                  .ToLocalChecked()
+                                  ->ToInteger(context)
+                                  .ToLocalChecked()
+                                  ->Value());
+  switch (code) {
+    case 0: {
+      *stackChanged = resultTuple->Get(context, 1)
+                          .ToLocalChecked()
+                          ->BooleanValue(context)
+                          .FromJust();
+      // Call stack may have changed after if the edited function was on the
+      // stack.
+      if (!dryRun && isPaused()) {
+        JavaScriptCallFrames frames = currentCallFrames();
+        newCallFrames->swap(frames);
+      }
+      return true;
+    }
+    // Compile error.
+    case 1: {
+      *exceptionDetails =
+          protocol::Runtime::ExceptionDetails::create()
+              .setExceptionId(m_inspector->nextExceptionId())
+              .setText(toProtocolStringWithTypeCheck(
+                  resultTuple->Get(context, 2).ToLocalChecked()))
+              .setLineNumber(static_cast<int>(resultTuple->Get(context, 3)
+                                                  .ToLocalChecked()
+                                                  ->ToInteger(context)
+                                                  .ToLocalChecked()
+                                                  ->Value()) -
+                             1)
+              .setColumnNumber(static_cast<int>(resultTuple->Get(context, 4)
+                                                    .ToLocalChecked()
+                                                    ->ToInteger(context)
+                                                    .ToLocalChecked()
+                                                    ->Value()) -
+                               1)
+              .build();
+      return false;
+    }
+  }
+  *error = "Unknown error.";
+  return false;
+}
+
+JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
+  if (!m_isolate->InContext()) return JavaScriptCallFrames();
+  v8::Local<v8::Value> currentCallFramesV8;
+  if (m_executionState.IsEmpty()) {
+    v8::Local<v8::Function> currentCallFramesFunction =
+        v8::Local<v8::Function>::Cast(
+            m_debuggerScript.Get(m_isolate)
+                ->Get(debuggerContext(),
+                      toV8StringInternalized(m_isolate, "currentCallFrames"))
+                .ToLocalChecked());
+    currentCallFramesV8 =
+        v8::Debug::Call(debuggerContext(), currentCallFramesFunction,
+                        v8::Integer::New(m_isolate, limit))
+            .ToLocalChecked();
+  } else {
+    v8::Local<v8::Value> argv[] = {m_executionState,
+                                   v8::Integer::New(m_isolate, limit)};
+    currentCallFramesV8 =
+        callDebuggerMethod("currentCallFrames", arraysize(argv), argv)
+            .ToLocalChecked();
+  }
+  DCHECK(!currentCallFramesV8.IsEmpty());
+  if (!currentCallFramesV8->IsArray()) return JavaScriptCallFrames();
+  v8::Local<v8::Array> callFramesArray = currentCallFramesV8.As<v8::Array>();
+  JavaScriptCallFrames callFrames;
+  for (uint32_t i = 0; i < callFramesArray->Length(); ++i) {
+    v8::Local<v8::Value> callFrameValue;
+    if (!callFramesArray->Get(debuggerContext(), i).ToLocal(&callFrameValue))
+      return JavaScriptCallFrames();
+    if (!callFrameValue->IsObject()) return JavaScriptCallFrames();
+    v8::Local<v8::Object> callFrameObject = callFrameValue.As<v8::Object>();
+    callFrames.push_back(JavaScriptCallFrame::create(
+        debuggerContext(), v8::Local<v8::Object>::Cast(callFrameObject)));
+  }
+  return callFrames;
+}
+
+static V8Debugger* toV8Debugger(v8::Local<v8::Value> data) {
+  void* p = v8::Local<v8::External>::Cast(data)->Value();
+  return static_cast<V8Debugger*>(p);
+}
+
+void V8Debugger::breakProgramCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  DCHECK_EQ(info.Length(), 2);
+  V8Debugger* thisPtr = toV8Debugger(info.Data());
+  if (!thisPtr->enabled()) return;
+  v8::Local<v8::Context> pausedContext =
+      thisPtr->m_isolate->GetCurrentContext();
+  v8::Local<v8::Value> exception;
+  v8::Local<v8::Array> hitBreakpoints;
+  thisPtr->handleProgramBreak(pausedContext,
+                              v8::Local<v8::Object>::Cast(info[0]), exception,
+                              hitBreakpoints);
+}
+
+void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
+                                    v8::Local<v8::Object> executionState,
+                                    v8::Local<v8::Value> exception,
+                                    v8::Local<v8::Array> hitBreakpointNumbers,
+                                    bool isPromiseRejection) {
+  // Don't allow nested breaks.
+  if (m_runningNestedMessageLoop) return;
+
+  V8DebuggerAgentImpl* agent =
+      m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+  if (!agent) return;
+
+  std::vector<String16> breakpointIds;
+  if (!hitBreakpointNumbers.IsEmpty()) {
+    breakpointIds.reserve(hitBreakpointNumbers->Length());
+    for (uint32_t i = 0; i < hitBreakpointNumbers->Length(); i++) {
+      v8::Local<v8::Value> hitBreakpointNumber =
+          hitBreakpointNumbers->Get(debuggerContext(), i).ToLocalChecked();
+      DCHECK(hitBreakpointNumber->IsInt32());
+      breakpointIds.push_back(String16::fromInteger(
+          hitBreakpointNumber->Int32Value(debuggerContext()).FromJust()));
+    }
+  }
+
+  m_pausedContext = pausedContext;
+  m_executionState = executionState;
+  V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
+      pausedContext, exception, breakpointIds, isPromiseRejection);
+  if (result == V8DebuggerAgentImpl::RequestNoSkip) {
+    m_runningNestedMessageLoop = true;
+    int groupId = getGroupId(pausedContext);
+    DCHECK(groupId);
+    m_inspector->client()->runMessageLoopOnPause(groupId);
+    // The agent may have been removed in the nested loop.
+    agent =
+        m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+    if (agent) agent->didContinue();
+    m_runningNestedMessageLoop = false;
+  }
+  m_pausedContext.Clear();
+  m_executionState.Clear();
+
+  if (result == V8DebuggerAgentImpl::RequestStepFrame) {
+    v8::Local<v8::Value> argv[] = {executionState};
+    callDebuggerMethod("stepFrameStatement", 1, argv);
+  } else if (result == V8DebuggerAgentImpl::RequestStepInto) {
+    v8::Local<v8::Value> argv[] = {executionState};
+    callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+  } else if (result == V8DebuggerAgentImpl::RequestStepOut) {
+    v8::Local<v8::Value> argv[] = {executionState};
+    callDebuggerMethod(stepOutV8MethodName, 1, argv);
+  }
+}
+
+void V8Debugger::v8DebugEventCallback(
+    const v8::Debug::EventDetails& eventDetails) {
+  V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
+  thisPtr->handleV8DebugEvent(eventDetails);
+}
+
+v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
+    v8::Local<v8::Object> object, const char* functionName) {
+  v8::MicrotasksScope microtasks(m_isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::Local<v8::Value> getterValue =
+      object
+          ->Get(m_isolate->GetCurrentContext(),
+                toV8StringInternalized(m_isolate, functionName))
+          .ToLocalChecked();
+  DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
+  return v8::Local<v8::Function>::Cast(getterValue)
+      ->Call(m_isolate->GetCurrentContext(), object, 0, 0)
+      .ToLocalChecked();
+}
+
+void V8Debugger::handleV8DebugEvent(
+    const v8::Debug::EventDetails& eventDetails) {
+  if (!enabled()) return;
+  v8::DebugEvent event = eventDetails.GetEvent();
+  if (event != v8::AsyncTaskEvent && event != v8::Break &&
+      event != v8::Exception && event != v8::AfterCompile &&
+      event != v8::BeforeCompile && event != v8::CompileError)
+    return;
+
+  v8::Local<v8::Context> eventContext = eventDetails.GetEventContext();
+  DCHECK(!eventContext.IsEmpty());
+
+  if (event == v8::AsyncTaskEvent) {
+    v8::HandleScope scope(m_isolate);
+    handleV8AsyncTaskEvent(eventContext, eventDetails.GetExecutionState(),
+                           eventDetails.GetEventData());
+    return;
+  }
+
+  V8DebuggerAgentImpl* agent =
+      m_inspector->enabledDebuggerAgentForGroup(getGroupId(eventContext));
+  if (agent) {
+    v8::HandleScope scope(m_isolate);
+    if (m_ignoreScriptParsedEventsCounter == 0 &&
+        (event == v8::AfterCompile || event == v8::CompileError)) {
+      v8::Context::Scope contextScope(debuggerContext());
+      v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
+      v8::Local<v8::Value> value =
+          callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
+      if (value->IsNull()) return;
+      DCHECK(value->IsObject());
+      v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
+      agent->didParseSource(
+          wrapUnique(new V8DebuggerScript(debuggerContext(), scriptObject,
+                                          inLiveEditScope)),
+          event == v8::AfterCompile);
+    } else if (event == v8::Exception) {
+      v8::Local<v8::Object> eventData = eventDetails.GetEventData();
+      v8::Local<v8::Value> exception =
+          callInternalGetterFunction(eventData, "exception");
+      v8::Local<v8::Value> promise =
+          callInternalGetterFunction(eventData, "promise");
+      bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
+      handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+                         exception, v8::Local<v8::Array>(), isPromiseRejection);
+    } else if (event == v8::Break) {
+      v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
+      v8::Local<v8::Value> hitBreakpoints =
+          callDebuggerMethod("getBreakpointNumbers", 1, argv).ToLocalChecked();
+      DCHECK(hitBreakpoints->IsArray());
+      handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+                         v8::Local<v8::Value>(),
+                         hitBreakpoints.As<v8::Array>());
+    }
+  }
+}
+
+void V8Debugger::handleV8AsyncTaskEvent(v8::Local<v8::Context> context,
+                                        v8::Local<v8::Object> executionState,
+                                        v8::Local<v8::Object> eventData) {
+  if (!m_maxAsyncCallStackDepth) return;
+
+  String16 type = toProtocolStringWithTypeCheck(
+      callInternalGetterFunction(eventData, "type"));
+  String16 name = toProtocolStringWithTypeCheck(
+      callInternalGetterFunction(eventData, "name"));
+  int id = static_cast<int>(callInternalGetterFunction(eventData, "id")
+                                ->ToInteger(context)
+                                .ToLocalChecked()
+                                ->Value());
+  // Async task events from Promises are given misaligned pointers to prevent
+  // from overlapping with other Blink task identifiers. There is a single
+  // namespace of such ids, managed by src/js/promise.js.
+  void* ptr = reinterpret_cast<void*>(id * 2 + 1);
+  if (type == v8AsyncTaskEventEnqueue)
+    asyncTaskScheduled(name, ptr, false);
+  else if (type == v8AsyncTaskEventEnqueueRecurring)
+    asyncTaskScheduled(name, ptr, true);
+  else if (type == v8AsyncTaskEventWillHandle)
+    asyncTaskStarted(ptr);
+  else if (type == v8AsyncTaskEventDidHandle)
+    asyncTaskFinished(ptr);
+  else if (type == v8AsyncTaskEventCancel)
+    asyncTaskCanceled(ptr);
+  else
+    UNREACHABLE();
+}
+
+V8StackTraceImpl* V8Debugger::currentAsyncCallChain() {
+  if (!m_currentStacks.size()) return nullptr;
+  return m_currentStacks.back().get();
+}
+
+void V8Debugger::compileDebuggerScript() {
+  if (!m_debuggerScript.IsEmpty()) {
+    UNREACHABLE();
+    return;
+  }
+
+  v8::HandleScope scope(m_isolate);
+  v8::Context::Scope contextScope(debuggerContext());
+
+  v8::Local<v8::String> scriptValue =
+      v8::String::NewFromUtf8(m_isolate, DebuggerScript_js,
+                              v8::NewStringType::kInternalized,
+                              sizeof(DebuggerScript_js))
+          .ToLocalChecked();
+  v8::Local<v8::Value> value;
+  if (!m_inspector->compileAndRunInternalScript(debuggerContext(), scriptValue)
+           .ToLocal(&value)) {
+    UNREACHABLE();
+    return;
+  }
+  DCHECK(value->IsObject());
+  m_debuggerScript.Reset(m_isolate, value.As<v8::Object>());
+}
+
+v8::Local<v8::Context> V8Debugger::debuggerContext() const {
+  DCHECK(!m_debuggerContext.IsEmpty());
+  return m_debuggerContext.Get(m_isolate);
+}
+
+v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
+    v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+  if (!enabled()) {
+    UNREACHABLE();
+    return v8::Local<v8::Value>::New(m_isolate, v8::Undefined(m_isolate));
+  }
+  v8::Local<v8::Value> argv[] = {function};
+  v8::Local<v8::Value> scopesValue;
+  if (!callDebuggerMethod("getFunctionScopes", 1, argv).ToLocal(&scopesValue))
+    return v8::MaybeLocal<v8::Value>();
+  v8::Local<v8::Value> copied;
+  if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+                                    scopesValue)
+           .ToLocal(&copied) ||
+      !copied->IsArray())
+    return v8::MaybeLocal<v8::Value>();
+  if (!markAsInternal(context, v8::Local<v8::Array>::Cast(copied),
+                      V8InternalValueType::kScopeList))
+    return v8::MaybeLocal<v8::Value>();
+  if (!markArrayEntriesAsInternal(context, v8::Local<v8::Array>::Cast(copied),
+                                  V8InternalValueType::kScope))
+    return v8::MaybeLocal<v8::Value>();
+  return copied;
+}
+
+v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
+    v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+  v8::Local<v8::Array> properties;
+  if (!v8::Debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
+    return v8::MaybeLocal<v8::Array>();
+  if (value->IsFunction()) {
+    v8::Local<v8::Function> function = value.As<v8::Function>();
+    v8::Local<v8::Value> location = functionLocation(context, function);
+    if (location->IsObject()) {
+      createDataProperty(
+          context, properties, properties->Length(),
+          toV8StringInternalized(m_isolate, "[[FunctionLocation]]"));
+      createDataProperty(context, properties, properties->Length(), location);
+    }
+    if (function->IsGeneratorFunction()) {
+      createDataProperty(context, properties, properties->Length(),
+                         toV8StringInternalized(m_isolate, "[[IsGenerator]]"));
+      createDataProperty(context, properties, properties->Length(),
+                         v8::True(m_isolate));
+    }
+  }
+  if (!enabled()) return properties;
+  if (value->IsMap() || value->IsWeakMap() || value->IsSet() ||
+      value->IsWeakSet() || value->IsSetIterator() || value->IsMapIterator()) {
+    v8::Local<v8::Value> entries =
+        collectionEntries(context, v8::Local<v8::Object>::Cast(value));
+    if (entries->IsArray()) {
+      createDataProperty(context, properties, properties->Length(),
+                         toV8StringInternalized(m_isolate, "[[Entries]]"));
+      createDataProperty(context, properties, properties->Length(), entries);
+    }
+  }
+  if (value->IsGeneratorObject()) {
+    v8::Local<v8::Value> location =
+        generatorObjectLocation(context, v8::Local<v8::Object>::Cast(value));
+    if (location->IsObject()) {
+      createDataProperty(
+          context, properties, properties->Length(),
+          toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
+      createDataProperty(context, properties, properties->Length(), location);
+    }
+  }
+  if (value->IsFunction()) {
+    v8::Local<v8::Function> function = value.As<v8::Function>();
+    v8::Local<v8::Value> boundFunction = function->GetBoundFunction();
+    v8::Local<v8::Value> scopes;
+    if (boundFunction->IsUndefined() &&
+        functionScopes(context, function).ToLocal(&scopes)) {
+      createDataProperty(context, properties, properties->Length(),
+                         toV8StringInternalized(m_isolate, "[[Scopes]]"));
+      createDataProperty(context, properties, properties->Length(), scopes);
+    }
+  }
+  return properties;
+}
+
+v8::Local<v8::Value> V8Debugger::collectionEntries(
+    v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+  if (!enabled()) {
+    UNREACHABLE();
+    return v8::Undefined(m_isolate);
+  }
+  v8::Local<v8::Value> argv[] = {object};
+  v8::Local<v8::Value> entriesValue =
+      callDebuggerMethod("getCollectionEntries", 1, argv).ToLocalChecked();
+  if (!entriesValue->IsArray()) return v8::Undefined(m_isolate);
+
+  v8::Local<v8::Array> entries = entriesValue.As<v8::Array>();
+  v8::Local<v8::Array> copiedArray =
+      v8::Array::New(m_isolate, entries->Length());
+  if (!copiedArray->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
+    return v8::Undefined(m_isolate);
+  for (uint32_t i = 0; i < entries->Length(); ++i) {
+    v8::Local<v8::Value> item;
+    if (!entries->Get(debuggerContext(), i).ToLocal(&item))
+      return v8::Undefined(m_isolate);
+    v8::Local<v8::Value> copied;
+    if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+                                      item)
+             .ToLocal(&copied))
+      return v8::Undefined(m_isolate);
+    if (!createDataProperty(context, copiedArray, i, copied).FromMaybe(false))
+      return v8::Undefined(m_isolate);
+  }
+  if (!markArrayEntriesAsInternal(context,
+                                  v8::Local<v8::Array>::Cast(copiedArray),
+                                  V8InternalValueType::kEntry))
+    return v8::Undefined(m_isolate);
+  return copiedArray;
+}
+
+v8::Local<v8::Value> V8Debugger::generatorObjectLocation(
+    v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+  if (!enabled()) {
+    UNREACHABLE();
+    return v8::Null(m_isolate);
+  }
+  v8::Local<v8::Value> argv[] = {object};
+  v8::Local<v8::Value> location =
+      callDebuggerMethod("getGeneratorObjectLocation", 1, argv)
+          .ToLocalChecked();
+  v8::Local<v8::Value> copied;
+  if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+                                    location)
+           .ToLocal(&copied) ||
+      !copied->IsObject())
+    return v8::Null(m_isolate);
+  if (!markAsInternal(context, v8::Local<v8::Object>::Cast(copied),
+                      V8InternalValueType::kLocation))
+    return v8::Null(m_isolate);
+  return copied;
+}
+
+v8::Local<v8::Value> V8Debugger::functionLocation(
+    v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+  int scriptId = function->ScriptId();
+  if (scriptId == v8::UnboundScript::kNoScriptId) return v8::Null(m_isolate);
+  int lineNumber = function->GetScriptLineNumber();
+  int columnNumber = function->GetScriptColumnNumber();
+  if (lineNumber == v8::Function::kLineOffsetNotFound ||
+      columnNumber == v8::Function::kLineOffsetNotFound)
+    return v8::Null(m_isolate);
+  v8::Local<v8::Object> location = v8::Object::New(m_isolate);
+  if (!location->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
+    return v8::Null(m_isolate);
+  if (!createDataProperty(
+           context, location, toV8StringInternalized(m_isolate, "scriptId"),
+           toV8String(m_isolate, String16::fromInteger(scriptId)))
+           .FromMaybe(false))
+    return v8::Null(m_isolate);
+  if (!createDataProperty(context, location,
+                          toV8StringInternalized(m_isolate, "lineNumber"),
+                          v8::Integer::New(m_isolate, lineNumber))
+           .FromMaybe(false))
+    return v8::Null(m_isolate);
+  if (!createDataProperty(context, location,
+                          toV8StringInternalized(m_isolate, "columnNumber"),
+                          v8::Integer::New(m_isolate, columnNumber))
+           .FromMaybe(false))
+    return v8::Null(m_isolate);
+  if (!markAsInternal(context, location, V8InternalValueType::kLocation))
+    return v8::Null(m_isolate);
+  return location;
+}
+
+bool V8Debugger::isPaused() { return !m_pausedContext.IsEmpty(); }
+
+std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
+    v8::Local<v8::StackTrace> stackTrace) {
+  int contextGroupId =
+      m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+  return V8StackTraceImpl::create(this, contextGroupId, stackTrace,
+                                  V8StackTraceImpl::maxCallStackSizeToCapture);
+}
+
+int V8Debugger::markContext(const V8ContextInfo& info) {
+  DCHECK(info.context->GetIsolate() == m_isolate);
+  int contextId = ++m_lastContextId;
+  String16 debugData = String16::fromInteger(info.contextGroupId) + "," +
+                       String16::fromInteger(contextId) + "," +
+                       toString16(info.auxData);
+  v8::Context::Scope contextScope(info.context);
+  info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
+                                toV8String(m_isolate, debugData));
+  return contextId;
+}
+
+void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
+  if (depth <= 0)
+    m_maxAsyncCallStackDepthMap.erase(agent);
+  else
+    m_maxAsyncCallStackDepthMap[agent] = depth;
+
+  int maxAsyncCallStackDepth = 0;
+  for (const auto& pair : m_maxAsyncCallStackDepthMap) {
+    if (pair.second > maxAsyncCallStackDepth)
+      maxAsyncCallStackDepth = pair.second;
+  }
+
+  if (m_maxAsyncCallStackDepth == maxAsyncCallStackDepth) return;
+  m_maxAsyncCallStackDepth = maxAsyncCallStackDepth;
+  if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
+}
+
+void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
+                                    bool recurring) {
+  if (!m_maxAsyncCallStackDepth) return;
+  asyncTaskScheduled(toString16(taskName), task, recurring);
+}
+
+void V8Debugger::asyncTaskScheduled(const String16& taskName, void* task,
+                                    bool recurring) {
+  if (!m_maxAsyncCallStackDepth) return;
+  v8::HandleScope scope(m_isolate);
+  int contextGroupId =
+      m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+  std::unique_ptr<V8StackTraceImpl> chain = V8StackTraceImpl::capture(
+      this, contextGroupId, V8StackTraceImpl::maxCallStackSizeToCapture,
+      taskName);
+  if (chain) {
+    m_asyncTaskStacks[task] = std::move(chain);
+    if (recurring) m_recurringTasks.insert(task);
+  }
+}
+
+void V8Debugger::asyncTaskCanceled(void* task) {
+  if (!m_maxAsyncCallStackDepth) return;
+  m_asyncTaskStacks.erase(task);
+  m_recurringTasks.erase(task);
+}
+
+void V8Debugger::asyncTaskStarted(void* task) {
+  if (!m_maxAsyncCallStackDepth) return;
+  m_currentTasks.push_back(task);
+  AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
+  // Needs to support following order of events:
+  // - asyncTaskScheduled
+  //   <-- attached here -->
+  // - asyncTaskStarted
+  // - asyncTaskCanceled <-- canceled before finished
+  //   <-- async stack requested here -->
+  // - asyncTaskFinished
+  std::unique_ptr<V8StackTraceImpl> stack;
+  if (stackIt != m_asyncTaskStacks.end() && stackIt->second)
+    stack = stackIt->second->cloneImpl();
+  m_currentStacks.push_back(std::move(stack));
+}
+
+void V8Debugger::asyncTaskFinished(void* task) {
+  if (!m_maxAsyncCallStackDepth) return;
+  // We could start instrumenting half way and the stack is empty.
+  if (!m_currentStacks.size()) return;
+
+  DCHECK(m_currentTasks.back() == task);
+  m_currentTasks.pop_back();
+
+  m_currentStacks.pop_back();
+  if (m_recurringTasks.find(task) == m_recurringTasks.end())
+    m_asyncTaskStacks.erase(task);
+}
+
+void V8Debugger::allAsyncTasksCanceled() {
+  m_asyncTaskStacks.clear();
+  m_recurringTasks.clear();
+  m_currentStacks.clear();
+  m_currentTasks.clear();
+}
+
+void V8Debugger::muteScriptParsedEvents() {
+  ++m_ignoreScriptParsedEventsCounter;
+}
+
+void V8Debugger::unmuteScriptParsedEvents() {
+  --m_ignoreScriptParsedEventsCounter;
+  DCHECK_GE(m_ignoreScriptParsedEventsCounter, 0);
+}
+
+std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
+    bool fullStack) {
+  if (!m_isolate->InContext()) return nullptr;
+
+  v8::HandleScope handles(m_isolate);
+  int contextGroupId = getGroupId(m_isolate->GetCurrentContext());
+  if (!contextGroupId) return nullptr;
+
+  size_t stackSize =
+      fullStack ? V8StackTraceImpl::maxCallStackSizeToCapture : 1;
+  if (m_inspector->enabledRuntimeAgentForGroup(contextGroupId))
+    stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+
+  return V8StackTraceImpl::capture(this, contextGroupId, stackSize);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-debugger.h b/src/inspector/v8-debugger.h
new file mode 100644
index 0000000..83c1b21
--- /dev/null
+++ b/src/inspector/v8-debugger.h
@@ -0,0 +1,160 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8DEBUGGER_H_
+#define V8_INSPECTOR_V8DEBUGGER_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/v8-debugger-script.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint;
+class V8DebuggerAgentImpl;
+class V8InspectorImpl;
+class V8StackTraceImpl;
+
+using protocol::ErrorString;
+
+class V8Debugger {
+ public:
+  V8Debugger(v8::Isolate*, V8InspectorImpl*);
+  ~V8Debugger();
+
+  static int contextId(v8::Local<v8::Context>);
+  static int getGroupId(v8::Local<v8::Context>);
+  int markContext(const V8ContextInfo&);
+
+  bool enabled() const;
+
+  String16 setBreakpoint(const String16& sourceID, const ScriptBreakpoint&,
+                         int* actualLineNumber, int* actualColumnNumber);
+  void removeBreakpoint(const String16& breakpointId);
+  void setBreakpointsActivated(bool);
+  bool breakpointsActivated() const { return m_breakpointsActivated; }
+
+  enum PauseOnExceptionsState {
+    DontPauseOnExceptions,
+    PauseOnAllExceptions,
+    PauseOnUncaughtExceptions
+  };
+  PauseOnExceptionsState getPauseOnExceptionsState();
+  void setPauseOnExceptionsState(PauseOnExceptionsState);
+  void setPauseOnNextStatement(bool);
+  bool canBreakProgram();
+  void breakProgram();
+  void continueProgram();
+  void stepIntoStatement();
+  void stepOverStatement();
+  void stepOutOfFunction();
+  void clearStepping();
+
+  bool setScriptSource(const String16& sourceID,
+                       v8::Local<v8::String> newSource, bool dryRun,
+                       ErrorString*,
+                       protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
+                       JavaScriptCallFrames* newCallFrames,
+                       protocol::Maybe<bool>* stackChanged);
+  JavaScriptCallFrames currentCallFrames(int limit = 0);
+
+  // Each script inherits debug data from v8::Context where it has been
+  // compiled.
+  // Only scripts whose debug data matches |contextGroupId| will be reported.
+  // Passing 0 will result in reporting all scripts.
+  void getCompiledScripts(int contextGroupId,
+                          std::vector<std::unique_ptr<V8DebuggerScript>>&);
+  void enable();
+  void disable();
+
+  bool isPaused();
+  v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
+
+  int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
+  V8StackTraceImpl* currentAsyncCallChain();
+  void setAsyncCallStackDepth(V8DebuggerAgentImpl*, int);
+  std::unique_ptr<V8StackTraceImpl> createStackTrace(v8::Local<v8::StackTrace>);
+  std::unique_ptr<V8StackTraceImpl> captureStackTrace(bool fullStack);
+
+  v8::MaybeLocal<v8::Array> internalProperties(v8::Local<v8::Context>,
+                                               v8::Local<v8::Value>);
+
+  void asyncTaskScheduled(const StringView& taskName, void* task,
+                          bool recurring);
+  void asyncTaskScheduled(const String16& taskName, void* task, bool recurring);
+  void asyncTaskCanceled(void* task);
+  void asyncTaskStarted(void* task);
+  void asyncTaskFinished(void* task);
+  void allAsyncTasksCanceled();
+
+  void muteScriptParsedEvents();
+  void unmuteScriptParsedEvents();
+
+  V8InspectorImpl* inspector() { return m_inspector; }
+
+ private:
+  void compileDebuggerScript();
+  v8::MaybeLocal<v8::Value> callDebuggerMethod(const char* functionName,
+                                               int argc,
+                                               v8::Local<v8::Value> argv[]);
+  v8::Local<v8::Context> debuggerContext() const;
+  void clearBreakpoints();
+
+  static void breakProgramCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  void handleProgramBreak(v8::Local<v8::Context> pausedContext,
+                          v8::Local<v8::Object> executionState,
+                          v8::Local<v8::Value> exception,
+                          v8::Local<v8::Array> hitBreakpoints,
+                          bool isPromiseRejection = false);
+  static void v8DebugEventCallback(const v8::Debug::EventDetails&);
+  v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
+                                                  const char* functionName);
+  void handleV8DebugEvent(const v8::Debug::EventDetails&);
+  void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
+                              v8::Local<v8::Object> executionState,
+                              v8::Local<v8::Object> eventData);
+
+  v8::Local<v8::Value> collectionEntries(v8::Local<v8::Context>,
+                                         v8::Local<v8::Object>);
+  v8::Local<v8::Value> generatorObjectLocation(v8::Local<v8::Context>,
+                                               v8::Local<v8::Object>);
+  v8::Local<v8::Value> functionLocation(v8::Local<v8::Context>,
+                                        v8::Local<v8::Function>);
+  v8::MaybeLocal<v8::Value> functionScopes(v8::Local<v8::Context>,
+                                           v8::Local<v8::Function>);
+
+  v8::Isolate* m_isolate;
+  V8InspectorImpl* m_inspector;
+  int m_lastContextId;
+  int m_enableCount;
+  bool m_breakpointsActivated;
+  v8::Global<v8::Object> m_debuggerScript;
+  v8::Global<v8::Context> m_debuggerContext;
+  v8::Local<v8::Object> m_executionState;
+  v8::Local<v8::Context> m_pausedContext;
+  bool m_runningNestedMessageLoop;
+  int m_ignoreScriptParsedEventsCounter;
+
+  using AsyncTaskToStackTrace =
+      protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
+  AsyncTaskToStackTrace m_asyncTaskStacks;
+  protocol::HashSet<void*> m_recurringTasks;
+  int m_maxAsyncCallStackDepth;
+  std::vector<void*> m_currentTasks;
+  std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
+  protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+
+  DISALLOW_COPY_AND_ASSIGN(V8Debugger);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8DEBUGGER_H_
diff --git a/src/inspector/v8-function-call.cc b/src/inspector/v8-function-call.cc
new file mode 100644
index 0000000..3880e31
--- /dev/null
+++ b/src/inspector/v8-function-call.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-function-call.h"
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+V8FunctionCall::V8FunctionCall(V8InspectorImpl* inspector,
+                               v8::Local<v8::Context> context,
+                               v8::Local<v8::Value> value, const String16& name)
+    : m_inspector(inspector),
+      m_context(context),
+      m_name(toV8String(context->GetIsolate(), name)),
+      m_value(value) {}
+
+void V8FunctionCall::appendArgument(v8::Local<v8::Value> value) {
+  m_arguments.push_back(value);
+}
+
+void V8FunctionCall::appendArgument(const String16& argument) {
+  m_arguments.push_back(toV8String(m_context->GetIsolate(), argument));
+}
+
+void V8FunctionCall::appendArgument(int argument) {
+  m_arguments.push_back(v8::Number::New(m_context->GetIsolate(), argument));
+}
+
+void V8FunctionCall::appendArgument(bool argument) {
+  m_arguments.push_back(argument ? v8::True(m_context->GetIsolate())
+                                 : v8::False(m_context->GetIsolate()));
+}
+
+v8::Local<v8::Value> V8FunctionCall::call(bool& hadException,
+                                          bool reportExceptions) {
+  v8::TryCatch tryCatch(m_context->GetIsolate());
+  tryCatch.SetVerbose(reportExceptions);
+
+  v8::Local<v8::Value> result = callWithoutExceptionHandling();
+  hadException = tryCatch.HasCaught();
+  return result;
+}
+
+v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
+  v8::Local<v8::Object> thisObject = v8::Local<v8::Object>::Cast(m_value);
+  v8::Local<v8::Value> value;
+  if (!thisObject->Get(m_context, m_name).ToLocal(&value))
+    return v8::Local<v8::Value>();
+
+  DCHECK(value->IsFunction());
+
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
+  std::unique_ptr<v8::Local<v8::Value>[]> info(
+      new v8::Local<v8::Value>[m_arguments.size()]);
+  for (size_t i = 0; i < m_arguments.size(); ++i) {
+    info[i] = m_arguments[i];
+    DCHECK(!info[i].IsEmpty());
+  }
+
+  int contextGroupId = V8Debugger::getGroupId(m_context);
+  if (contextGroupId) {
+    m_inspector->client()->muteMetrics(contextGroupId);
+    m_inspector->muteExceptions(contextGroupId);
+  }
+  v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
+                                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::MaybeLocal<v8::Value> maybeResult = function->Call(
+      m_context, thisObject, static_cast<int>(m_arguments.size()), info.get());
+  if (contextGroupId) {
+    m_inspector->client()->unmuteMetrics(contextGroupId);
+    m_inspector->unmuteExceptions(contextGroupId);
+  }
+
+  v8::Local<v8::Value> result;
+  if (!maybeResult.ToLocal(&result)) return v8::Local<v8::Value>();
+  return result;
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-function-call.h b/src/inspector/v8-function-call.h
new file mode 100644
index 0000000..0337caa
--- /dev/null
+++ b/src/inspector/v8-function-call.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8FUNCTIONCALL_H_
+#define V8_INSPECTOR_V8FUNCTIONCALL_H_
+
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+class V8FunctionCall {
+ public:
+  V8FunctionCall(V8InspectorImpl*, v8::Local<v8::Context>, v8::Local<v8::Value>,
+                 const String16& name);
+
+  void appendArgument(v8::Local<v8::Value>);
+  void appendArgument(const String16&);
+  void appendArgument(int);
+  void appendArgument(bool);
+
+  v8::Local<v8::Value> call(bool& hadException, bool reportExceptions = true);
+  v8::Local<v8::Value> callWithoutExceptionHandling();
+
+ protected:
+  V8InspectorImpl* m_inspector;
+  v8::Local<v8::Context> m_context;
+  std::vector<v8::Local<v8::Value>> m_arguments;
+  v8::Local<v8::String> m_name;
+  v8::Local<v8::Value> m_value;
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8FUNCTIONCALL_H_
diff --git a/src/inspector/v8-heap-profiler-agent-impl.cc b/src/inspector/v8-heap-profiler-agent-impl.cc
new file mode 100644
index 0000000..84c890b
--- /dev/null
+++ b/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -0,0 +1,407 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-heap-profiler-agent-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+
+#include "include/v8-inspector.h"
+#include "include/v8-profiler.h"
+#include "include/v8-version.h"
+
+namespace v8_inspector {
+
+namespace {
+
+namespace HeapProfilerAgentState {
+static const char heapProfilerEnabled[] = "heapProfilerEnabled";
+static const char heapObjectsTrackingEnabled[] = "heapObjectsTrackingEnabled";
+static const char allocationTrackingEnabled[] = "allocationTrackingEnabled";
+static const char samplingHeapProfilerEnabled[] = "samplingHeapProfilerEnabled";
+static const char samplingHeapProfilerInterval[] =
+    "samplingHeapProfilerInterval";
+}
+
+class HeapSnapshotProgress final : public v8::ActivityControl {
+ public:
+  explicit HeapSnapshotProgress(protocol::HeapProfiler::Frontend* frontend)
+      : m_frontend(frontend) {}
+  ControlOption ReportProgressValue(int done, int total) override {
+    m_frontend->reportHeapSnapshotProgress(done, total,
+                                           protocol::Maybe<bool>());
+    if (done >= total) {
+      m_frontend->reportHeapSnapshotProgress(total, total, true);
+    }
+    m_frontend->flush();
+    return kContinue;
+  }
+
+ private:
+  protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+class GlobalObjectNameResolver final
+    : public v8::HeapProfiler::ObjectNameResolver {
+ public:
+  explicit GlobalObjectNameResolver(V8InspectorSessionImpl* session)
+      : m_offset(0), m_strings(10000), m_session(session) {}
+
+  const char* GetName(v8::Local<v8::Object> object) override {
+    InspectedContext* context = m_session->inspector()->getContext(
+        m_session->contextGroupId(),
+        V8Debugger::contextId(object->CreationContext()));
+    if (!context) return "";
+    String16 name = context->origin();
+    size_t length = name.length();
+    if (m_offset + length + 1 >= m_strings.size()) return "";
+    for (size_t i = 0; i < length; ++i) {
+      UChar ch = name[i];
+      m_strings[m_offset + i] = ch > 0xff ? '?' : static_cast<char>(ch);
+    }
+    m_strings[m_offset + length] = '\0';
+    char* result = &*m_strings.begin() + m_offset;
+    m_offset += length + 1;
+    return result;
+  }
+
+ private:
+  size_t m_offset;
+  std::vector<char> m_strings;
+  V8InspectorSessionImpl* m_session;
+};
+
+class HeapSnapshotOutputStream final : public v8::OutputStream {
+ public:
+  explicit HeapSnapshotOutputStream(protocol::HeapProfiler::Frontend* frontend)
+      : m_frontend(frontend) {}
+  void EndOfStream() override {}
+  int GetChunkSize() override { return 102400; }
+  WriteResult WriteAsciiChunk(char* data, int size) override {
+    m_frontend->addHeapSnapshotChunk(String16(data, size));
+    m_frontend->flush();
+    return kContinue;
+  }
+
+ private:
+  protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+v8::Local<v8::Object> objectByHeapObjectId(v8::Isolate* isolate, int id) {
+  v8::HeapProfiler* profiler = isolate->GetHeapProfiler();
+  v8::Local<v8::Value> value = profiler->FindObjectById(id);
+  if (value.IsEmpty() || !value->IsObject()) return v8::Local<v8::Object>();
+  return value.As<v8::Object>();
+}
+
+class InspectableHeapObject final : public V8InspectorSession::Inspectable {
+ public:
+  explicit InspectableHeapObject(int heapObjectId)
+      : m_heapObjectId(heapObjectId) {}
+  v8::Local<v8::Value> get(v8::Local<v8::Context> context) override {
+    return objectByHeapObjectId(context->GetIsolate(), m_heapObjectId);
+  }
+
+ private:
+  int m_heapObjectId;
+};
+
+class HeapStatsStream final : public v8::OutputStream {
+ public:
+  explicit HeapStatsStream(protocol::HeapProfiler::Frontend* frontend)
+      : m_frontend(frontend) {}
+
+  void EndOfStream() override {}
+
+  WriteResult WriteAsciiChunk(char* data, int size) override {
+    DCHECK(false);
+    return kAbort;
+  }
+
+  WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData,
+                                  int count) override {
+    DCHECK_GT(count, 0);
+    std::unique_ptr<protocol::Array<int>> statsDiff =
+        protocol::Array<int>::create();
+    for (int i = 0; i < count; ++i) {
+      statsDiff->addItem(updateData[i].index);
+      statsDiff->addItem(updateData[i].count);
+      statsDiff->addItem(updateData[i].size);
+    }
+    m_frontend->heapStatsUpdate(std::move(statsDiff));
+    return kContinue;
+  }
+
+ private:
+  protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+}  // namespace
+
+V8HeapProfilerAgentImpl::V8HeapProfilerAgentImpl(
+    V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+    protocol::DictionaryValue* state)
+    : m_session(session),
+      m_isolate(session->inspector()->isolate()),
+      m_frontend(frontendChannel),
+      m_state(state),
+      m_hasTimer(false) {}
+
+V8HeapProfilerAgentImpl::~V8HeapProfilerAgentImpl() {}
+
+void V8HeapProfilerAgentImpl::restore() {
+  if (m_state->booleanProperty(HeapProfilerAgentState::heapProfilerEnabled,
+                               false))
+    m_frontend.resetProfiles();
+  if (m_state->booleanProperty(
+          HeapProfilerAgentState::heapObjectsTrackingEnabled, false))
+    startTrackingHeapObjectsInternal(m_state->booleanProperty(
+        HeapProfilerAgentState::allocationTrackingEnabled, false));
+  if (m_state->booleanProperty(
+          HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
+    ErrorString error;
+    double samplingInterval = m_state->doubleProperty(
+        HeapProfilerAgentState::samplingHeapProfilerInterval, -1);
+    DCHECK_GE(samplingInterval, 0);
+    startSampling(&error, Maybe<double>(samplingInterval));
+  }
+}
+
+void V8HeapProfilerAgentImpl::collectGarbage(ErrorString*) {
+  m_isolate->LowMemoryNotification();
+}
+
+void V8HeapProfilerAgentImpl::startTrackingHeapObjects(
+    ErrorString*, const protocol::Maybe<bool>& trackAllocations) {
+  m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled, true);
+  bool allocationTrackingEnabled = trackAllocations.fromMaybe(false);
+  m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled,
+                      allocationTrackingEnabled);
+  startTrackingHeapObjectsInternal(allocationTrackingEnabled);
+}
+
+void V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
+    ErrorString* error, const protocol::Maybe<bool>& reportProgress) {
+  requestHeapStatsUpdate();
+  takeHeapSnapshot(error, reportProgress);
+  stopTrackingHeapObjectsInternal();
+}
+
+void V8HeapProfilerAgentImpl::enable(ErrorString*) {
+  m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, true);
+}
+
+void V8HeapProfilerAgentImpl::disable(ErrorString* error) {
+  stopTrackingHeapObjectsInternal();
+  if (m_state->booleanProperty(
+          HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
+    v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+    if (profiler) profiler->StopSamplingHeapProfiler();
+  }
+  m_isolate->GetHeapProfiler()->ClearObjectIds();
+  m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, false);
+}
+
+void V8HeapProfilerAgentImpl::takeHeapSnapshot(
+    ErrorString* errorString, const protocol::Maybe<bool>& reportProgress) {
+  v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+  if (!profiler) {
+    *errorString = "Cannot access v8 heap profiler";
+    return;
+  }
+  std::unique_ptr<HeapSnapshotProgress> progress;
+  if (reportProgress.fromMaybe(false))
+    progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
+
+  GlobalObjectNameResolver resolver(m_session);
+  const v8::HeapSnapshot* snapshot =
+      profiler->TakeHeapSnapshot(progress.get(), &resolver);
+  if (!snapshot) {
+    *errorString = "Failed to take heap snapshot";
+    return;
+  }
+  HeapSnapshotOutputStream stream(&m_frontend);
+  snapshot->Serialize(&stream);
+  const_cast<v8::HeapSnapshot*>(snapshot)->Delete();
+}
+
+void V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
+    ErrorString* error, const String16& heapSnapshotObjectId,
+    const protocol::Maybe<String16>& objectGroup,
+    std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
+  bool ok;
+  int id = heapSnapshotObjectId.toInteger(&ok);
+  if (!ok) {
+    *error = "Invalid heap snapshot object id";
+    return;
+  }
+
+  v8::HandleScope handles(m_isolate);
+  v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
+  if (heapObject.IsEmpty()) {
+    *error = "Object is not available";
+    return;
+  }
+
+  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
+    *error = "Object is not available";
+    return;
+  }
+
+  *result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
+                                  objectGroup.fromMaybe(""), false);
+  if (!result) *error = "Object is not available";
+}
+
+void V8HeapProfilerAgentImpl::addInspectedHeapObject(
+    ErrorString* errorString, const String16& inspectedHeapObjectId) {
+  bool ok;
+  int id = inspectedHeapObjectId.toInteger(&ok);
+  if (!ok) {
+    *errorString = "Invalid heap snapshot object id";
+    return;
+  }
+
+  v8::HandleScope handles(m_isolate);
+  v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
+  if (heapObject.IsEmpty()) {
+    *errorString = "Object is not available";
+    return;
+  }
+
+  if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
+    *errorString = "Object is not available";
+    return;
+  }
+
+  m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+}
+
+void V8HeapProfilerAgentImpl::getHeapObjectId(ErrorString* errorString,
+                                              const String16& objectId,
+                                              String16* heapSnapshotObjectId) {
+  v8::HandleScope handles(m_isolate);
+  v8::Local<v8::Value> value;
+  v8::Local<v8::Context> context;
+  if (!m_session->unwrapObject(errorString, objectId, &value, &context,
+                               nullptr) ||
+      value->IsUndefined())
+    return;
+
+  v8::SnapshotObjectId id = m_isolate->GetHeapProfiler()->GetObjectId(value);
+  *heapSnapshotObjectId = String16::fromInteger(static_cast<size_t>(id));
+}
+
+void V8HeapProfilerAgentImpl::requestHeapStatsUpdate() {
+  HeapStatsStream stream(&m_frontend);
+  v8::SnapshotObjectId lastSeenObjectId =
+      m_isolate->GetHeapProfiler()->GetHeapStats(&stream);
+  m_frontend.lastSeenObjectId(
+      lastSeenObjectId, m_session->inspector()->client()->currentTimeMS());
+}
+
+// static
+void V8HeapProfilerAgentImpl::onTimer(void* data) {
+  reinterpret_cast<V8HeapProfilerAgentImpl*>(data)->requestHeapStatsUpdate();
+}
+
+void V8HeapProfilerAgentImpl::startTrackingHeapObjectsInternal(
+    bool trackAllocations) {
+  m_isolate->GetHeapProfiler()->StartTrackingHeapObjects(trackAllocations);
+  if (!m_hasTimer) {
+    m_hasTimer = true;
+    m_session->inspector()->client()->startRepeatingTimer(
+        0.05, &V8HeapProfilerAgentImpl::onTimer, reinterpret_cast<void*>(this));
+  }
+}
+
+void V8HeapProfilerAgentImpl::stopTrackingHeapObjectsInternal() {
+  if (m_hasTimer) {
+    m_session->inspector()->client()->cancelTimer(
+        reinterpret_cast<void*>(this));
+    m_hasTimer = false;
+  }
+  m_isolate->GetHeapProfiler()->StopTrackingHeapObjects();
+  m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled,
+                      false);
+  m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled, false);
+}
+
+void V8HeapProfilerAgentImpl::startSampling(
+    ErrorString* errorString, const Maybe<double>& samplingInterval) {
+  v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+  if (!profiler) {
+    *errorString = "Cannot access v8 heap profiler";
+    return;
+  }
+  const unsigned defaultSamplingInterval = 1 << 15;
+  double samplingIntervalValue =
+      samplingInterval.fromMaybe(defaultSamplingInterval);
+  m_state->setDouble(HeapProfilerAgentState::samplingHeapProfilerInterval,
+                     samplingIntervalValue);
+  m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
+                      true);
+  profiler->StartSamplingHeapProfiler(
+      static_cast<uint64_t>(samplingIntervalValue), 128,
+      v8::HeapProfiler::kSamplingForceGC);
+}
+
+namespace {
+std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
+buildSampingHeapProfileNode(const v8::AllocationProfile::Node* node) {
+  auto children = protocol::Array<
+      protocol::HeapProfiler::SamplingHeapProfileNode>::create();
+  for (const auto* child : node->children)
+    children->addItem(buildSampingHeapProfileNode(child));
+  size_t selfSize = 0;
+  for (const auto& allocation : node->allocations)
+    selfSize += allocation.size * allocation.count;
+  std::unique_ptr<protocol::Runtime::CallFrame> callFrame =
+      protocol::Runtime::CallFrame::create()
+          .setFunctionName(toProtocolString(node->name))
+          .setScriptId(String16::fromInteger(node->script_id))
+          .setUrl(toProtocolString(node->script_name))
+          .setLineNumber(node->line_number - 1)
+          .setColumnNumber(node->column_number - 1)
+          .build();
+  std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode> result =
+      protocol::HeapProfiler::SamplingHeapProfileNode::create()
+          .setCallFrame(std::move(callFrame))
+          .setSelfSize(selfSize)
+          .setChildren(std::move(children))
+          .build();
+  return result;
+}
+}  // namespace
+
+void V8HeapProfilerAgentImpl::stopSampling(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
+  v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+  if (!profiler) {
+    *errorString = "Cannot access v8 heap profiler";
+    return;
+  }
+  v8::HandleScope scope(
+      m_isolate);  // Allocation profile contains Local handles.
+  std::unique_ptr<v8::AllocationProfile> v8Profile(
+      profiler->GetAllocationProfile());
+  profiler->StopSamplingHeapProfiler();
+  m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
+                      false);
+  if (!v8Profile) {
+    *errorString = "Cannot access v8 sampled heap profile.";
+    return;
+  }
+  v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
+  *profile = protocol::HeapProfiler::SamplingHeapProfile::create()
+                 .setHead(buildSampingHeapProfileNode(root))
+                 .build();
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-heap-profiler-agent-impl.h b/src/inspector/v8-heap-profiler-agent-impl.h
new file mode 100644
index 0000000..caa9698
--- /dev/null
+++ b/src/inspector/v8-heap-profiler-agent-impl.h
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#define V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/HeapProfiler.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
+ public:
+  V8HeapProfilerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                          protocol::DictionaryValue* state);
+  ~V8HeapProfilerAgentImpl() override;
+  void restore();
+
+  void collectGarbage(ErrorString*) override;
+
+  void enable(ErrorString*) override;
+  void startTrackingHeapObjects(ErrorString*,
+                                const Maybe<bool>& trackAllocations) override;
+  void stopTrackingHeapObjects(ErrorString*,
+                               const Maybe<bool>& reportProgress) override;
+
+  void disable(ErrorString*) override;
+
+  void takeHeapSnapshot(ErrorString*,
+                        const Maybe<bool>& reportProgress) override;
+
+  void getObjectByHeapObjectId(
+      ErrorString*, const String16& heapSnapshotObjectId,
+      const Maybe<String16>& objectGroup,
+      std::unique_ptr<protocol::Runtime::RemoteObject>* result) override;
+  void addInspectedHeapObject(ErrorString*,
+                              const String16& inspectedHeapObjectId) override;
+  void getHeapObjectId(ErrorString*, const String16& objectId,
+                       String16* heapSnapshotObjectId) override;
+
+  void startSampling(ErrorString*,
+                     const Maybe<double>& samplingInterval) override;
+  void stopSampling(
+      ErrorString*,
+      std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
+
+ private:
+  void startTrackingHeapObjectsInternal(bool trackAllocations);
+  void stopTrackingHeapObjectsInternal();
+  void requestHeapStatsUpdate();
+  static void onTimer(void*);
+
+  V8InspectorSessionImpl* m_session;
+  v8::Isolate* m_isolate;
+  protocol::HeapProfiler::Frontend m_frontend;
+  protocol::DictionaryValue* m_state;
+  bool m_hasTimer;
+
+  DISALLOW_COPY_AND_ASSIGN(V8HeapProfilerAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
diff --git a/src/inspector/v8-injected-script-host.cc b/src/inspector/v8-injected-script-host.cc
new file mode 100644
index 0000000..dc41ef8
--- /dev/null
+++ b/src/inspector/v8-injected-script-host.cc
@@ -0,0 +1,216 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-injected-script-host.h"
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-internal-value-type.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+void setFunctionProperty(v8::Local<v8::Context> context,
+                         v8::Local<v8::Object> obj, const char* name,
+                         v8::FunctionCallback callback,
+                         v8::Local<v8::External> external) {
+  v8::Local<v8::String> funcName =
+      toV8StringInternalized(context->GetIsolate(), name);
+  v8::Local<v8::Function> func;
+  if (!v8::Function::New(context, callback, external, 0,
+                         v8::ConstructorBehavior::kThrow)
+           .ToLocal(&func))
+    return;
+  func->SetName(funcName);
+  createDataProperty(context, obj, funcName, func);
+}
+
+V8InspectorImpl* unwrapInspector(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  DCHECK(!info.Data().IsEmpty());
+  DCHECK(info.Data()->IsExternal());
+  V8InspectorImpl* inspector =
+      static_cast<V8InspectorImpl*>(info.Data().As<v8::External>()->Value());
+  DCHECK(inspector);
+  return inspector;
+}
+
+}  // namespace
+
+v8::Local<v8::Object> V8InjectedScriptHost::create(
+    v8::Local<v8::Context> context, V8InspectorImpl* inspector) {
+  v8::Isolate* isolate = inspector->isolate();
+  v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate);
+  bool success = injectedScriptHost->SetPrototype(context, v8::Null(isolate))
+                     .FromMaybe(false);
+  DCHECK(success);
+  USE(success);
+  v8::Local<v8::External> debuggerExternal =
+      v8::External::New(isolate, inspector);
+  setFunctionProperty(context, injectedScriptHost, "internalConstructorName",
+                      V8InjectedScriptHost::internalConstructorNameCallback,
+                      debuggerExternal);
+  setFunctionProperty(
+      context, injectedScriptHost, "formatAccessorsAsProperties",
+      V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal);
+  setFunctionProperty(context, injectedScriptHost, "subtype",
+                      V8InjectedScriptHost::subtypeCallback, debuggerExternal);
+  setFunctionProperty(context, injectedScriptHost, "getInternalProperties",
+                      V8InjectedScriptHost::getInternalPropertiesCallback,
+                      debuggerExternal);
+  setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty",
+                      V8InjectedScriptHost::objectHasOwnPropertyCallback,
+                      debuggerExternal);
+  setFunctionProperty(context, injectedScriptHost, "bind",
+                      V8InjectedScriptHost::bindCallback, debuggerExternal);
+  setFunctionProperty(context, injectedScriptHost, "proxyTargetValue",
+                      V8InjectedScriptHost::proxyTargetValueCallback,
+                      debuggerExternal);
+  return injectedScriptHost;
+}
+
+void V8InjectedScriptHost::internalConstructorNameCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() < 1 || !info[0]->IsObject()) return;
+
+  v8::Local<v8::Object> object = info[0].As<v8::Object>();
+  info.GetReturnValue().Set(object->GetConstructorName());
+}
+
+void V8InjectedScriptHost::formatAccessorsAsProperties(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  DCHECK_EQ(info.Length(), 2);
+  info.GetReturnValue().Set(false);
+  if (!info[1]->IsFunction()) return;
+  // Check that function is user-defined.
+  if (info[1].As<v8::Function>()->ScriptId() != v8::UnboundScript::kNoScriptId)
+    return;
+  info.GetReturnValue().Set(
+      unwrapInspector(info)->client()->formatAccessorsAsProperties(info[0]));
+}
+
+void V8InjectedScriptHost::subtypeCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() < 1) return;
+
+  v8::Isolate* isolate = info.GetIsolate();
+  v8::Local<v8::Value> value = info[0];
+  if (value->IsObject()) {
+    v8::Local<v8::Value> internalType = v8InternalValueTypeFrom(
+        isolate->GetCurrentContext(), v8::Local<v8::Object>::Cast(value));
+    if (internalType->IsString()) {
+      info.GetReturnValue().Set(internalType);
+      return;
+    }
+  }
+  if (value->IsArray() || value->IsArgumentsObject()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "array"));
+    return;
+  }
+  if (value->IsTypedArray()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "typedarray"));
+    return;
+  }
+  if (value->IsDate()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "date"));
+    return;
+  }
+  if (value->IsRegExp()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "regexp"));
+    return;
+  }
+  if (value->IsMap() || value->IsWeakMap()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "map"));
+    return;
+  }
+  if (value->IsSet() || value->IsWeakSet()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "set"));
+    return;
+  }
+  if (value->IsMapIterator() || value->IsSetIterator()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "iterator"));
+    return;
+  }
+  if (value->IsGeneratorObject()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "generator"));
+    return;
+  }
+  if (value->IsNativeError()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "error"));
+    return;
+  }
+  if (value->IsProxy()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "proxy"));
+    return;
+  }
+  if (value->IsPromise()) {
+    info.GetReturnValue().Set(toV8StringInternalized(isolate, "promise"));
+    return;
+  }
+  std::unique_ptr<StringBuffer> subtype =
+      unwrapInspector(info)->client()->valueSubtype(value);
+  if (subtype) {
+    info.GetReturnValue().Set(toV8String(isolate, subtype->string()));
+    return;
+  }
+}
+
+void V8InjectedScriptHost::getInternalPropertiesCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() < 1) return;
+  v8::Local<v8::Array> properties;
+  if (unwrapInspector(info)
+          ->debugger()
+          ->internalProperties(info.GetIsolate()->GetCurrentContext(), info[0])
+          .ToLocal(&properties))
+    info.GetReturnValue().Set(properties);
+}
+
+void V8InjectedScriptHost::objectHasOwnPropertyCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() < 2 || !info[0]->IsObject() || !info[1]->IsString()) return;
+  bool result = info[0]
+                    .As<v8::Object>()
+                    ->HasOwnProperty(info.GetIsolate()->GetCurrentContext(),
+                                     v8::Local<v8::String>::Cast(info[1]))
+                    .FromMaybe(false);
+  info.GetReturnValue().Set(v8::Boolean::New(info.GetIsolate(), result));
+}
+
+void V8InjectedScriptHost::bindCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() < 2 || !info[1]->IsString()) return;
+  InjectedScriptNative* injectedScriptNative =
+      InjectedScriptNative::fromInjectedScriptHost(info.GetIsolate(),
+                                                   info.Holder());
+  if (!injectedScriptNative) return;
+
+  v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+  v8::Local<v8::String> v8groupName =
+      info[1]->ToString(context).ToLocalChecked();
+  String16 groupName = toProtocolStringWithTypeCheck(v8groupName);
+  int id = injectedScriptNative->bind(info[0], groupName);
+  info.GetReturnValue().Set(id);
+}
+
+void V8InjectedScriptHost::proxyTargetValueCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  if (info.Length() != 1 || !info[0]->IsProxy()) {
+    UNREACHABLE();
+    return;
+  }
+  v8::Local<v8::Object> target = info[0].As<v8::Proxy>();
+  while (target->IsProxy())
+    target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
+  info.GetReturnValue().Set(target);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-injected-script-host.h b/src/inspector/v8-injected-script-host.h
new file mode 100644
index 0000000..7d293af
--- /dev/null
+++ b/src/inspector/v8-injected-script-host.h
@@ -0,0 +1,46 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#define V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+// SECURITY NOTE: Although the InjectedScriptHost is intended for use solely by
+// the inspector,
+// a reference to the InjectedScriptHost may be leaked to the page being
+// inspected. Thus, the
+// InjectedScriptHost must never implemment methods that have more power over
+// the page than the
+// page already has itself (e.g. origin restriction bypasses).
+
+class V8InjectedScriptHost {
+ public:
+  // We expect that debugger outlives any JS context and thus
+  // V8InjectedScriptHost (owned by JS)
+  // is destroyed before inspector.
+  static v8::Local<v8::Object> create(v8::Local<v8::Context>, V8InspectorImpl*);
+
+ private:
+  static void internalConstructorNameCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void formatAccessorsAsProperties(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void subtypeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void getInternalPropertiesCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void objectHasOwnPropertyCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+  static void bindCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+  static void proxyTargetValueCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
diff --git a/src/inspector/v8-inspector-impl.cc b/src/inspector/v8-inspector-impl.cc
new file mode 100644
index 0000000..bd68548
--- /dev/null
+++ b/src/inspector/v8-inspector-impl.cc
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2010-2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+namespace v8_inspector {
+
+std::unique_ptr<V8Inspector> V8Inspector::create(v8::Isolate* isolate,
+                                                 V8InspectorClient* client) {
+  return wrapUnique(new V8InspectorImpl(isolate, client));
+}
+
+V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
+                                 V8InspectorClient* client)
+    : m_isolate(isolate),
+      m_client(client),
+      m_debugger(new V8Debugger(isolate, this)),
+      m_capturingStackTracesCount(0),
+      m_lastExceptionId(0) {}
+
+V8InspectorImpl::~V8InspectorImpl() {}
+
+V8DebuggerAgentImpl* V8InspectorImpl::enabledDebuggerAgentForGroup(
+    int contextGroupId) {
+  V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+  V8DebuggerAgentImpl* agent = session ? session->debuggerAgent() : nullptr;
+  return agent && agent->enabled() ? agent : nullptr;
+}
+
+V8RuntimeAgentImpl* V8InspectorImpl::enabledRuntimeAgentForGroup(
+    int contextGroupId) {
+  V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+  V8RuntimeAgentImpl* agent = session ? session->runtimeAgent() : nullptr;
+  return agent && agent->enabled() ? agent : nullptr;
+}
+
+V8ProfilerAgentImpl* V8InspectorImpl::enabledProfilerAgentForGroup(
+    int contextGroupId) {
+  V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+  V8ProfilerAgentImpl* agent = session ? session->profilerAgent() : nullptr;
+  return agent && agent->enabled() ? agent : nullptr;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::runCompiledScript(
+    v8::Local<v8::Context> context, v8::Local<v8::Script> script) {
+  v8::MicrotasksScope microtasksScope(m_isolate,
+                                      v8::MicrotasksScope::kRunMicrotasks);
+  int groupId = V8Debugger::getGroupId(context);
+  if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+    agent->willExecuteScript(script->GetUnboundScript()->GetId());
+  v8::MaybeLocal<v8::Value> result = script->Run(context);
+  // Get agent from the map again, since it could have detached during script
+  // execution.
+  if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+    agent->didExecuteScript();
+  return result;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
+    v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+    v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
+  v8::MicrotasksScope microtasksScope(m_isolate,
+                                      v8::MicrotasksScope::kRunMicrotasks);
+  int groupId = V8Debugger::getGroupId(context);
+  if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+    agent->willExecuteScript(function->ScriptId());
+  v8::MaybeLocal<v8::Value> result =
+      function->Call(context, receiver, argc, info);
+  // Get agent from the map again, since it could have detached during script
+  // execution.
+  if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+    agent->didExecuteScript();
+  return result;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
+    v8::Local<v8::Context> context, v8::Local<v8::String> source) {
+  v8::Local<v8::Script> script =
+      compileScript(context, source, String16(), true);
+  if (script.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+  v8::MicrotasksScope microtasksScope(m_isolate,
+                                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+  return script->Run(context);
+}
+
+v8::Local<v8::Script> V8InspectorImpl::compileScript(
+    v8::Local<v8::Context> context, v8::Local<v8::String> code,
+    const String16& fileName, bool markAsInternal) {
+  v8::ScriptOrigin origin(
+      toV8String(m_isolate, fileName), v8::Integer::New(m_isolate, 0),
+      v8::Integer::New(m_isolate, 0),
+      v8::False(m_isolate),  // sharable
+      v8::Local<v8::Integer>(),
+      v8::Boolean::New(m_isolate, markAsInternal),  // internal
+      toV8String(m_isolate, String16()),            // sourceMap
+      v8::True(m_isolate));                         // opaqueresource
+  v8::ScriptCompiler::Source source(code, origin);
+  v8::Local<v8::Script> script;
+  if (!v8::ScriptCompiler::Compile(context, &source,
+                                   v8::ScriptCompiler::kNoCompileOptions)
+           .ToLocal(&script))
+    return v8::Local<v8::Script>();
+  return script;
+}
+
+void V8InspectorImpl::enableStackCapturingIfNeeded() {
+  if (!m_capturingStackTracesCount)
+    V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
+                                                                true);
+  ++m_capturingStackTracesCount;
+}
+
+void V8InspectorImpl::disableStackCapturingIfNeeded() {
+  if (!(--m_capturingStackTracesCount))
+    V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
+                                                                false);
+}
+
+void V8InspectorImpl::muteExceptions(int contextGroupId) {
+  m_muteExceptionsMap[contextGroupId]++;
+}
+
+void V8InspectorImpl::unmuteExceptions(int contextGroupId) {
+  m_muteExceptionsMap[contextGroupId]--;
+}
+
+V8ConsoleMessageStorage* V8InspectorImpl::ensureConsoleMessageStorage(
+    int contextGroupId) {
+  ConsoleStorageMap::iterator storageIt =
+      m_consoleStorageMap.find(contextGroupId);
+  if (storageIt == m_consoleStorageMap.end())
+    storageIt =
+        m_consoleStorageMap
+            .insert(std::make_pair(
+                contextGroupId,
+                wrapUnique(new V8ConsoleMessageStorage(this, contextGroupId))))
+            .first;
+  return storageIt->second.get();
+}
+
+bool V8InspectorImpl::hasConsoleMessageStorage(int contextGroupId) {
+  ConsoleStorageMap::iterator storageIt =
+      m_consoleStorageMap.find(contextGroupId);
+  return storageIt != m_consoleStorageMap.end();
+}
+
+std::unique_ptr<V8StackTrace> V8InspectorImpl::createStackTrace(
+    v8::Local<v8::StackTrace> stackTrace) {
+  return m_debugger->createStackTrace(stackTrace);
+}
+
+std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
+    int contextGroupId, V8Inspector::Channel* channel,
+    const StringView& state) {
+  DCHECK(m_sessions.find(contextGroupId) == m_sessions.cend());
+  std::unique_ptr<V8InspectorSessionImpl> session =
+      V8InspectorSessionImpl::create(this, contextGroupId, channel, state);
+  m_sessions[contextGroupId] = session.get();
+  return std::move(session);
+}
+
+void V8InspectorImpl::disconnect(V8InspectorSessionImpl* session) {
+  DCHECK(m_sessions.find(session->contextGroupId()) != m_sessions.end());
+  m_sessions.erase(session->contextGroupId());
+}
+
+InspectedContext* V8InspectorImpl::getContext(int groupId,
+                                              int contextId) const {
+  if (!groupId || !contextId) return nullptr;
+
+  ContextsByGroupMap::const_iterator contextGroupIt = m_contexts.find(groupId);
+  if (contextGroupIt == m_contexts.end()) return nullptr;
+
+  ContextByIdMap::iterator contextIt = contextGroupIt->second->find(contextId);
+  if (contextIt == contextGroupIt->second->end()) return nullptr;
+
+  return contextIt->second.get();
+}
+
+void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
+  int contextId = m_debugger->markContext(info);
+
+  ContextsByGroupMap::iterator contextIt = m_contexts.find(info.contextGroupId);
+  if (contextIt == m_contexts.end())
+    contextIt = m_contexts
+                    .insert(std::make_pair(info.contextGroupId,
+                                           wrapUnique(new ContextByIdMap())))
+                    .first;
+
+  const auto& contextById = contextIt->second;
+
+  DCHECK(contextById->find(contextId) == contextById->cend());
+  InspectedContext* context = new InspectedContext(this, info, contextId);
+  (*contextById)[contextId] = wrapUnique(context);
+  SessionMap::iterator sessionIt = m_sessions.find(info.contextGroupId);
+  if (sessionIt != m_sessions.end())
+    sessionIt->second->runtimeAgent()->reportExecutionContextCreated(context);
+}
+
+void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
+  int contextId = V8Debugger::contextId(context);
+  int contextGroupId = V8Debugger::getGroupId(context);
+
+  ConsoleStorageMap::iterator storageIt =
+      m_consoleStorageMap.find(contextGroupId);
+  if (storageIt != m_consoleStorageMap.end())
+    storageIt->second->contextDestroyed(contextId);
+
+  InspectedContext* inspectedContext = getContext(contextGroupId, contextId);
+  if (!inspectedContext) return;
+
+  SessionMap::iterator iter = m_sessions.find(contextGroupId);
+  if (iter != m_sessions.end())
+    iter->second->runtimeAgent()->reportExecutionContextDestroyed(
+        inspectedContext);
+  discardInspectedContext(contextGroupId, contextId);
+}
+
+void V8InspectorImpl::resetContextGroup(int contextGroupId) {
+  m_consoleStorageMap.erase(contextGroupId);
+  m_muteExceptionsMap.erase(contextGroupId);
+  SessionMap::iterator session = m_sessions.find(contextGroupId);
+  if (session != m_sessions.end()) session->second->reset();
+  m_contexts.erase(contextGroupId);
+}
+
+void V8InspectorImpl::willExecuteScript(v8::Local<v8::Context> context,
+                                        int scriptId) {
+  if (V8DebuggerAgentImpl* agent =
+          enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+    agent->willExecuteScript(scriptId);
+}
+
+void V8InspectorImpl::didExecuteScript(v8::Local<v8::Context> context) {
+  if (V8DebuggerAgentImpl* agent =
+          enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+    agent->didExecuteScript();
+}
+
+void V8InspectorImpl::idleStarted() {
+  for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
+    if (it->second->profilerAgent()->idleStarted()) return;
+  }
+}
+
+void V8InspectorImpl::idleFinished() {
+  for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
+    if (it->second->profilerAgent()->idleFinished()) return;
+  }
+}
+
+unsigned V8InspectorImpl::exceptionThrown(
+    v8::Local<v8::Context> context, const StringView& message,
+    v8::Local<v8::Value> exception, const StringView& detailedMessage,
+    const StringView& url, unsigned lineNumber, unsigned columnNumber,
+    std::unique_ptr<V8StackTrace> stackTrace, int scriptId) {
+  int contextGroupId = V8Debugger::getGroupId(context);
+  if (!contextGroupId || m_muteExceptionsMap[contextGroupId]) return 0;
+  std::unique_ptr<V8StackTraceImpl> stackTraceImpl =
+      wrapUnique(static_cast<V8StackTraceImpl*>(stackTrace.release()));
+  unsigned exceptionId = nextExceptionId();
+  std::unique_ptr<V8ConsoleMessage> consoleMessage =
+      V8ConsoleMessage::createForException(
+          m_client->currentTimeMS(), toString16(detailedMessage),
+          toString16(url), lineNumber, columnNumber, std::move(stackTraceImpl),
+          scriptId, m_isolate, toString16(message),
+          V8Debugger::contextId(context), exception, exceptionId);
+  ensureConsoleMessageStorage(contextGroupId)
+      ->addMessage(std::move(consoleMessage));
+  return exceptionId;
+}
+
+void V8InspectorImpl::exceptionRevoked(v8::Local<v8::Context> context,
+                                       unsigned exceptionId,
+                                       const StringView& message) {
+  int contextGroupId = V8Debugger::getGroupId(context);
+  if (!contextGroupId) return;
+
+  std::unique_ptr<V8ConsoleMessage> consoleMessage =
+      V8ConsoleMessage::createForRevokedException(
+          m_client->currentTimeMS(), toString16(message), exceptionId);
+  ensureConsoleMessageStorage(contextGroupId)
+      ->addMessage(std::move(consoleMessage));
+}
+
+std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
+    bool fullStack) {
+  return m_debugger->captureStackTrace(fullStack);
+}
+
+void V8InspectorImpl::asyncTaskScheduled(const StringView& taskName, void* task,
+                                         bool recurring) {
+  m_debugger->asyncTaskScheduled(taskName, task, recurring);
+}
+
+void V8InspectorImpl::asyncTaskCanceled(void* task) {
+  m_debugger->asyncTaskCanceled(task);
+}
+
+void V8InspectorImpl::asyncTaskStarted(void* task) {
+  m_debugger->asyncTaskStarted(task);
+}
+
+void V8InspectorImpl::asyncTaskFinished(void* task) {
+  m_debugger->asyncTaskFinished(task);
+}
+
+void V8InspectorImpl::allAsyncTasksCanceled() {
+  m_debugger->allAsyncTasksCanceled();
+}
+
+v8::Local<v8::Context> V8InspectorImpl::regexContext() {
+  if (m_regexContext.IsEmpty())
+    m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
+  return m_regexContext.Get(m_isolate);
+}
+
+void V8InspectorImpl::discardInspectedContext(int contextGroupId,
+                                              int contextId) {
+  if (!getContext(contextGroupId, contextId)) return;
+  m_contexts[contextGroupId]->erase(contextId);
+  if (m_contexts[contextGroupId]->empty()) m_contexts.erase(contextGroupId);
+}
+
+const V8InspectorImpl::ContextByIdMap* V8InspectorImpl::contextGroup(
+    int contextGroupId) {
+  ContextsByGroupMap::iterator iter = m_contexts.find(contextGroupId);
+  return iter == m_contexts.end() ? nullptr : iter->second.get();
+}
+
+V8InspectorSessionImpl* V8InspectorImpl::sessionForContextGroup(
+    int contextGroupId) {
+  if (!contextGroupId) return nullptr;
+  SessionMap::iterator iter = m_sessions.find(contextGroupId);
+  return iter == m_sessions.end() ? nullptr : iter->second;
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-inspector-impl.h b/src/inspector/v8-inspector-impl.h
new file mode 100644
index 0000000..0ca1a6a
--- /dev/null
+++ b/src/inspector/v8-inspector-impl.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8INSPECTORIMPL_H_
+#define V8_INSPECTOR_V8INSPECTORIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Protocol.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+class V8ConsoleMessageStorage;
+class V8Debugger;
+class V8DebuggerAgentImpl;
+class V8InspectorSessionImpl;
+class V8ProfilerAgentImpl;
+class V8RuntimeAgentImpl;
+class V8StackTraceImpl;
+
+class V8InspectorImpl : public V8Inspector {
+ public:
+  V8InspectorImpl(v8::Isolate*, V8InspectorClient*);
+  ~V8InspectorImpl() override;
+
+  v8::Isolate* isolate() const { return m_isolate; }
+  V8InspectorClient* client() { return m_client; }
+  V8Debugger* debugger() { return m_debugger.get(); }
+
+  v8::MaybeLocal<v8::Value> runCompiledScript(v8::Local<v8::Context>,
+                                              v8::Local<v8::Script>);
+  v8::MaybeLocal<v8::Value> callFunction(v8::Local<v8::Function>,
+                                         v8::Local<v8::Context>,
+                                         v8::Local<v8::Value> receiver,
+                                         int argc, v8::Local<v8::Value> info[]);
+  v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
+                                                        v8::Local<v8::String>);
+  v8::Local<v8::Script> compileScript(v8::Local<v8::Context>,
+                                      v8::Local<v8::String>,
+                                      const String16& fileName,
+                                      bool markAsInternal);
+  v8::Local<v8::Context> regexContext();
+
+  // V8Inspector implementation.
+  std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
+                                              V8Inspector::Channel*,
+                                              const StringView& state) override;
+  void contextCreated(const V8ContextInfo&) override;
+  void contextDestroyed(v8::Local<v8::Context>) override;
+  void resetContextGroup(int contextGroupId) override;
+  void willExecuteScript(v8::Local<v8::Context>, int scriptId) override;
+  void didExecuteScript(v8::Local<v8::Context>) override;
+  void idleStarted() override;
+  void idleFinished() override;
+  unsigned exceptionThrown(v8::Local<v8::Context>, const StringView& message,
+                           v8::Local<v8::Value> exception,
+                           const StringView& detailedMessage,
+                           const StringView& url, unsigned lineNumber,
+                           unsigned columnNumber, std::unique_ptr<V8StackTrace>,
+                           int scriptId) override;
+  void exceptionRevoked(v8::Local<v8::Context>, unsigned exceptionId,
+                        const StringView& message) override;
+  std::unique_ptr<V8StackTrace> createStackTrace(
+      v8::Local<v8::StackTrace>) override;
+  std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) override;
+  void asyncTaskScheduled(const StringView& taskName, void* task,
+                          bool recurring) override;
+  void asyncTaskCanceled(void* task) override;
+  void asyncTaskStarted(void* task) override;
+  void asyncTaskFinished(void* task) override;
+  void allAsyncTasksCanceled() override;
+
+  unsigned nextExceptionId() { return ++m_lastExceptionId; }
+  void enableStackCapturingIfNeeded();
+  void disableStackCapturingIfNeeded();
+  void muteExceptions(int contextGroupId);
+  void unmuteExceptions(int contextGroupId);
+  V8ConsoleMessageStorage* ensureConsoleMessageStorage(int contextGroupId);
+  bool hasConsoleMessageStorage(int contextGroupId);
+  using ContextByIdMap =
+      protocol::HashMap<int, std::unique_ptr<InspectedContext>>;
+  void discardInspectedContext(int contextGroupId, int contextId);
+  const ContextByIdMap* contextGroup(int contextGroupId);
+  void disconnect(V8InspectorSessionImpl*);
+  V8InspectorSessionImpl* sessionForContextGroup(int contextGroupId);
+  InspectedContext* getContext(int groupId, int contextId) const;
+  V8DebuggerAgentImpl* enabledDebuggerAgentForGroup(int contextGroupId);
+  V8RuntimeAgentImpl* enabledRuntimeAgentForGroup(int contextGroupId);
+  V8ProfilerAgentImpl* enabledProfilerAgentForGroup(int contextGroupId);
+
+ private:
+  v8::Isolate* m_isolate;
+  V8InspectorClient* m_client;
+  std::unique_ptr<V8Debugger> m_debugger;
+  v8::Global<v8::Context> m_regexContext;
+  int m_capturingStackTracesCount;
+  unsigned m_lastExceptionId;
+
+  using MuteExceptionsMap = protocol::HashMap<int, int>;
+  MuteExceptionsMap m_muteExceptionsMap;
+
+  using ContextsByGroupMap =
+      protocol::HashMap<int, std::unique_ptr<ContextByIdMap>>;
+  ContextsByGroupMap m_contexts;
+
+  using SessionMap = protocol::HashMap<int, V8InspectorSessionImpl*>;
+  SessionMap m_sessions;
+
+  using ConsoleStorageMap =
+      protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
+  ConsoleStorageMap m_consoleStorageMap;
+
+  DISALLOW_COPY_AND_ASSIGN(V8InspectorImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8INSPECTORIMPL_H_
diff --git a/src/inspector/v8-inspector-session-impl.cc b/src/inspector/v8-inspector-session-impl.cc
new file mode 100644
index 0000000..c3d3f48
--- /dev/null
+++ b/src/inspector/v8-inspector-session-impl.cc
@@ -0,0 +1,417 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-inspector-session-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/search-util.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-schema-agent-impl.h"
+
+namespace v8_inspector {
+
+// static
+bool V8InspectorSession::canDispatchMethod(const StringView& method) {
+  return stringViewStartsWith(method,
+                              protocol::Runtime::Metainfo::commandPrefix) ||
+         stringViewStartsWith(method,
+                              protocol::Debugger::Metainfo::commandPrefix) ||
+         stringViewStartsWith(method,
+                              protocol::Profiler::Metainfo::commandPrefix) ||
+         stringViewStartsWith(
+             method, protocol::HeapProfiler::Metainfo::commandPrefix) ||
+         stringViewStartsWith(method,
+                              protocol::Console::Metainfo::commandPrefix) ||
+         stringViewStartsWith(method,
+                              protocol::Schema::Metainfo::commandPrefix);
+}
+
+std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
+    V8InspectorImpl* inspector, int contextGroupId,
+    V8Inspector::Channel* channel, const StringView& state) {
+  return wrapUnique(
+      new V8InspectorSessionImpl(inspector, contextGroupId, channel, state));
+}
+
+V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
+                                               int contextGroupId,
+                                               V8Inspector::Channel* channel,
+                                               const StringView& savedState)
+    : m_contextGroupId(contextGroupId),
+      m_inspector(inspector),
+      m_channel(channel),
+      m_customObjectFormatterEnabled(false),
+      m_dispatcher(this),
+      m_state(nullptr),
+      m_runtimeAgent(nullptr),
+      m_debuggerAgent(nullptr),
+      m_heapProfilerAgent(nullptr),
+      m_profilerAgent(nullptr),
+      m_consoleAgent(nullptr),
+      m_schemaAgent(nullptr) {
+  if (savedState.length()) {
+    std::unique_ptr<protocol::Value> state =
+        protocol::parseJSON(toString16(savedState));
+    if (state) m_state = protocol::DictionaryValue::cast(std::move(state));
+    if (!m_state) m_state = protocol::DictionaryValue::create();
+  } else {
+    m_state = protocol::DictionaryValue::create();
+  }
+
+  m_runtimeAgent = wrapUnique(new V8RuntimeAgentImpl(
+      this, this, agentState(protocol::Runtime::Metainfo::domainName)));
+  protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
+
+  m_debuggerAgent = wrapUnique(new V8DebuggerAgentImpl(
+      this, this, agentState(protocol::Debugger::Metainfo::domainName)));
+  protocol::Debugger::Dispatcher::wire(&m_dispatcher, m_debuggerAgent.get());
+
+  m_profilerAgent = wrapUnique(new V8ProfilerAgentImpl(
+      this, this, agentState(protocol::Profiler::Metainfo::domainName)));
+  protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get());
+
+  m_heapProfilerAgent = wrapUnique(new V8HeapProfilerAgentImpl(
+      this, this, agentState(protocol::HeapProfiler::Metainfo::domainName)));
+  protocol::HeapProfiler::Dispatcher::wire(&m_dispatcher,
+                                           m_heapProfilerAgent.get());
+
+  m_consoleAgent = wrapUnique(new V8ConsoleAgentImpl(
+      this, this, agentState(protocol::Console::Metainfo::domainName)));
+  protocol::Console::Dispatcher::wire(&m_dispatcher, m_consoleAgent.get());
+
+  m_schemaAgent = wrapUnique(new V8SchemaAgentImpl(
+      this, this, agentState(protocol::Schema::Metainfo::domainName)));
+  protocol::Schema::Dispatcher::wire(&m_dispatcher, m_schemaAgent.get());
+
+  if (savedState.length()) {
+    m_runtimeAgent->restore();
+    m_debuggerAgent->restore();
+    m_heapProfilerAgent->restore();
+    m_profilerAgent->restore();
+    m_consoleAgent->restore();
+  }
+}
+
+V8InspectorSessionImpl::~V8InspectorSessionImpl() {
+  ErrorString errorString;
+  m_consoleAgent->disable(&errorString);
+  m_profilerAgent->disable(&errorString);
+  m_heapProfilerAgent->disable(&errorString);
+  m_debuggerAgent->disable(&errorString);
+  m_runtimeAgent->disable(&errorString);
+
+  discardInjectedScripts();
+  m_inspector->disconnect(this);
+}
+
+protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
+    const String16& name) {
+  protocol::DictionaryValue* state = m_state->getObject(name);
+  if (!state) {
+    std::unique_ptr<protocol::DictionaryValue> newState =
+        protocol::DictionaryValue::create();
+    state = newState.get();
+    m_state->setObject(name, std::move(newState));
+  }
+  return state;
+}
+
+void V8InspectorSessionImpl::sendProtocolResponse(int callId,
+                                                  const String16& message) {
+  m_channel->sendProtocolResponse(callId, toStringView(message));
+}
+
+void V8InspectorSessionImpl::sendProtocolNotification(const String16& message) {
+  m_channel->sendProtocolNotification(toStringView(message));
+}
+
+void V8InspectorSessionImpl::flushProtocolNotifications() {
+  m_channel->flushProtocolNotifications();
+}
+
+void V8InspectorSessionImpl::reset() {
+  m_debuggerAgent->reset();
+  m_runtimeAgent->reset();
+  discardInjectedScripts();
+}
+
+void V8InspectorSessionImpl::discardInjectedScripts() {
+  m_inspectedObjects.clear();
+  const V8InspectorImpl::ContextByIdMap* contexts =
+      m_inspector->contextGroup(m_contextGroupId);
+  if (!contexts) return;
+
+  std::vector<int> keys;
+  keys.reserve(contexts->size());
+  for (auto& idContext : *contexts) keys.push_back(idContext.first);
+  for (auto& key : keys) {
+    contexts = m_inspector->contextGroup(m_contextGroupId);
+    if (!contexts) continue;
+    auto contextIt = contexts->find(key);
+    if (contextIt != contexts->end())
+      contextIt->second
+          ->discardInjectedScript();  // This may destroy some contexts.
+  }
+}
+
+InjectedScript* V8InspectorSessionImpl::findInjectedScript(
+    ErrorString* errorString, int contextId) {
+  if (!contextId) {
+    *errorString = "Cannot find context with specified id";
+    return nullptr;
+  }
+
+  const V8InspectorImpl::ContextByIdMap* contexts =
+      m_inspector->contextGroup(m_contextGroupId);
+  if (!contexts) {
+    *errorString = "Cannot find context with specified id";
+    return nullptr;
+  }
+
+  auto contextsIt = contexts->find(contextId);
+  if (contextsIt == contexts->end()) {
+    *errorString = "Cannot find context with specified id";
+    return nullptr;
+  }
+
+  const std::unique_ptr<InspectedContext>& context = contextsIt->second;
+  if (!context->getInjectedScript()) {
+    if (!context->createInjectedScript()) {
+      *errorString = "Cannot access specified execution context";
+      return nullptr;
+    }
+    if (m_customObjectFormatterEnabled)
+      context->getInjectedScript()->setCustomObjectFormatterEnabled(true);
+  }
+  return context->getInjectedScript();
+}
+
+InjectedScript* V8InspectorSessionImpl::findInjectedScript(
+    ErrorString* errorString, RemoteObjectIdBase* objectId) {
+  return objectId ? findInjectedScript(errorString, objectId->contextId())
+                  : nullptr;
+}
+
+void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
+  releaseObjectGroup(toString16(objectGroup));
+}
+
+void V8InspectorSessionImpl::releaseObjectGroup(const String16& objectGroup) {
+  const V8InspectorImpl::ContextByIdMap* contexts =
+      m_inspector->contextGroup(m_contextGroupId);
+  if (!contexts) return;
+
+  std::vector<int> keys;
+  for (auto& idContext : *contexts) keys.push_back(idContext.first);
+  for (auto& key : keys) {
+    contexts = m_inspector->contextGroup(m_contextGroupId);
+    if (!contexts) continue;
+    auto contextsIt = contexts->find(key);
+    if (contextsIt == contexts->end()) continue;
+    InjectedScript* injectedScript = contextsIt->second->getInjectedScript();
+    if (injectedScript)
+      injectedScript->releaseObjectGroup(
+          objectGroup);  // This may destroy some contexts.
+  }
+}
+
+bool V8InspectorSessionImpl::unwrapObject(
+    std::unique_ptr<StringBuffer>* error, const StringView& objectId,
+    v8::Local<v8::Value>* object, v8::Local<v8::Context>* context,
+    std::unique_ptr<StringBuffer>* objectGroup) {
+  ErrorString errorString;
+  String16 objectGroupString;
+  bool result =
+      unwrapObject(&errorString, toString16(objectId), object, context,
+                   objectGroup ? &objectGroupString : nullptr);
+  if (error) *error = StringBufferImpl::adopt(errorString);
+  if (objectGroup) *objectGroup = StringBufferImpl::adopt(objectGroupString);
+  return result;
+}
+
+bool V8InspectorSessionImpl::unwrapObject(ErrorString* errorString,
+                                          const String16& objectId,
+                                          v8::Local<v8::Value>* object,
+                                          v8::Local<v8::Context>* context,
+                                          String16* objectGroup) {
+  std::unique_ptr<RemoteObjectId> remoteId =
+      RemoteObjectId::parse(errorString, objectId);
+  if (!remoteId) return false;
+  InjectedScript* injectedScript =
+      findInjectedScript(errorString, remoteId.get());
+  if (!injectedScript) return false;
+  if (!injectedScript->findObject(errorString, *remoteId, object)) return false;
+  *context = injectedScript->context()->context();
+  if (objectGroup) *objectGroup = injectedScript->objectGroupName(*remoteId);
+  return true;
+}
+
+std::unique_ptr<protocol::Runtime::API::RemoteObject>
+V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Value> value,
+                                   const StringView& groupName) {
+  return wrapObject(context, value, toString16(groupName), false);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Value> value,
+                                   const String16& groupName,
+                                   bool generatePreview) {
+  ErrorString errorString;
+  InjectedScript* injectedScript =
+      findInjectedScript(&errorString, V8Debugger::contextId(context));
+  if (!injectedScript) return nullptr;
+  return injectedScript->wrapObject(&errorString, value, groupName, false,
+                                    generatePreview);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
+                                  v8::Local<v8::Value> table,
+                                  v8::Local<v8::Value> columns) {
+  ErrorString errorString;
+  InjectedScript* injectedScript =
+      findInjectedScript(&errorString, V8Debugger::contextId(context));
+  if (!injectedScript) return nullptr;
+  return injectedScript->wrapTable(table, columns);
+}
+
+void V8InspectorSessionImpl::setCustomObjectFormatterEnabled(bool enabled) {
+  m_customObjectFormatterEnabled = enabled;
+  const V8InspectorImpl::ContextByIdMap* contexts =
+      m_inspector->contextGroup(m_contextGroupId);
+  if (!contexts) return;
+  for (auto& idContext : *contexts) {
+    InjectedScript* injectedScript = idContext.second->getInjectedScript();
+    if (injectedScript)
+      injectedScript->setCustomObjectFormatterEnabled(enabled);
+  }
+}
+
+void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
+  const V8InspectorImpl::ContextByIdMap* contexts =
+      m_inspector->contextGroup(m_contextGroupId);
+  if (!contexts) return;
+  for (auto& idContext : *contexts)
+    agent->reportExecutionContextCreated(idContext.second.get());
+}
+
+void V8InspectorSessionImpl::dispatchProtocolMessage(
+    const StringView& message) {
+  m_dispatcher.dispatch(protocol::parseJSON(message));
+}
+
+std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
+  String16 json = m_state->toJSONString();
+  return StringBufferImpl::adopt(json);
+}
+
+std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
+V8InspectorSessionImpl::supportedDomains() {
+  std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
+      supportedDomainsImpl();
+  std::vector<std::unique_ptr<protocol::Schema::API::Domain>> result;
+  for (size_t i = 0; i < domains.size(); ++i)
+    result.push_back(std::move(domains[i]));
+  return result;
+}
+
+std::vector<std::unique_ptr<protocol::Schema::Domain>>
+V8InspectorSessionImpl::supportedDomainsImpl() {
+  std::vector<std::unique_ptr<protocol::Schema::Domain>> result;
+  result.push_back(protocol::Schema::Domain::create()
+                       .setName(protocol::Runtime::Metainfo::domainName)
+                       .setVersion(protocol::Runtime::Metainfo::version)
+                       .build());
+  result.push_back(protocol::Schema::Domain::create()
+                       .setName(protocol::Debugger::Metainfo::domainName)
+                       .setVersion(protocol::Debugger::Metainfo::version)
+                       .build());
+  result.push_back(protocol::Schema::Domain::create()
+                       .setName(protocol::Profiler::Metainfo::domainName)
+                       .setVersion(protocol::Profiler::Metainfo::version)
+                       .build());
+  result.push_back(protocol::Schema::Domain::create()
+                       .setName(protocol::HeapProfiler::Metainfo::domainName)
+                       .setVersion(protocol::HeapProfiler::Metainfo::version)
+                       .build());
+  result.push_back(protocol::Schema::Domain::create()
+                       .setName(protocol::Schema::Metainfo::domainName)
+                       .setVersion(protocol::Schema::Metainfo::version)
+                       .build());
+  return result;
+}
+
+void V8InspectorSessionImpl::addInspectedObject(
+    std::unique_ptr<V8InspectorSession::Inspectable> inspectable) {
+  m_inspectedObjects.insert(m_inspectedObjects.begin(), std::move(inspectable));
+  if (m_inspectedObjects.size() > kInspectedObjectBufferSize)
+    m_inspectedObjects.resize(kInspectedObjectBufferSize);
+}
+
+V8InspectorSession::Inspectable* V8InspectorSessionImpl::inspectedObject(
+    unsigned num) {
+  if (num >= m_inspectedObjects.size()) return nullptr;
+  return m_inspectedObjects[num].get();
+}
+
+void V8InspectorSessionImpl::schedulePauseOnNextStatement(
+    const StringView& breakReason, const StringView& breakDetails) {
+  m_debuggerAgent->schedulePauseOnNextStatement(
+      toString16(breakReason),
+      protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+}
+
+void V8InspectorSessionImpl::cancelPauseOnNextStatement() {
+  m_debuggerAgent->cancelPauseOnNextStatement();
+}
+
+void V8InspectorSessionImpl::breakProgram(const StringView& breakReason,
+                                          const StringView& breakDetails) {
+  m_debuggerAgent->breakProgram(
+      toString16(breakReason),
+      protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+}
+
+void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
+  ErrorString errorString;
+  m_debuggerAgent->setSkipAllPauses(&errorString, skip);
+}
+
+void V8InspectorSessionImpl::resume() {
+  ErrorString errorString;
+  m_debuggerAgent->resume(&errorString);
+}
+
+void V8InspectorSessionImpl::stepOver() {
+  ErrorString errorString;
+  m_debuggerAgent->stepOver(&errorString);
+}
+
+std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
+V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
+                                            const StringView& query,
+                                            bool caseSensitive, bool isRegex) {
+  // TODO(dgozman): search may operate on StringView and avoid copying |text|.
+  std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
+      searchInTextByLinesImpl(this, toString16(text), toString16(query),
+                              caseSensitive, isRegex);
+  std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>> result;
+  for (size_t i = 0; i < matches.size(); ++i)
+    result.push_back(std::move(matches[i]));
+  return result;
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-inspector-session-impl.h b/src/inspector/v8-inspector-session-impl.h
new file mode 100644
index 0000000..e84e8c9
--- /dev/null
+++ b/src/inspector/v8-inspector-session-impl.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#define V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/protocol/Schema.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class RemoteObjectIdBase;
+class V8ConsoleAgentImpl;
+class V8DebuggerAgentImpl;
+class V8InspectorImpl;
+class V8HeapProfilerAgentImpl;
+class V8ProfilerAgentImpl;
+class V8RuntimeAgentImpl;
+class V8SchemaAgentImpl;
+
+using protocol::ErrorString;
+
+class V8InspectorSessionImpl : public V8InspectorSession,
+                               public protocol::FrontendChannel {
+ public:
+  static std::unique_ptr<V8InspectorSessionImpl> create(
+      V8InspectorImpl*, int contextGroupId, V8Inspector::Channel*,
+      const StringView& state);
+  ~V8InspectorSessionImpl();
+
+  V8InspectorImpl* inspector() const { return m_inspector; }
+  V8ConsoleAgentImpl* consoleAgent() { return m_consoleAgent.get(); }
+  V8DebuggerAgentImpl* debuggerAgent() { return m_debuggerAgent.get(); }
+  V8SchemaAgentImpl* schemaAgent() { return m_schemaAgent.get(); }
+  V8ProfilerAgentImpl* profilerAgent() { return m_profilerAgent.get(); }
+  V8RuntimeAgentImpl* runtimeAgent() { return m_runtimeAgent.get(); }
+  int contextGroupId() const { return m_contextGroupId; }
+
+  InjectedScript* findInjectedScript(ErrorString*, int contextId);
+  InjectedScript* findInjectedScript(ErrorString*, RemoteObjectIdBase*);
+  void reset();
+  void discardInjectedScripts();
+  void reportAllContexts(V8RuntimeAgentImpl*);
+  void setCustomObjectFormatterEnabled(bool);
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+      v8::Local<v8::Context>, v8::Local<v8::Value>, const String16& groupName,
+      bool generatePreview);
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
+      v8::Local<v8::Context>, v8::Local<v8::Value> table,
+      v8::Local<v8::Value> columns);
+  std::vector<std::unique_ptr<protocol::Schema::Domain>> supportedDomainsImpl();
+  bool unwrapObject(ErrorString*, const String16& objectId,
+                    v8::Local<v8::Value>*, v8::Local<v8::Context>*,
+                    String16* objectGroup);
+  void releaseObjectGroup(const String16& objectGroup);
+
+  // V8InspectorSession implementation.
+  void dispatchProtocolMessage(const StringView& message) override;
+  std::unique_ptr<StringBuffer> stateJSON() override;
+  std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
+      override;
+  void addInspectedObject(
+      std::unique_ptr<V8InspectorSession::Inspectable>) override;
+  void schedulePauseOnNextStatement(const StringView& breakReason,
+                                    const StringView& breakDetails) override;
+  void cancelPauseOnNextStatement() override;
+  void breakProgram(const StringView& breakReason,
+                    const StringView& breakDetails) override;
+  void setSkipAllPauses(bool) override;
+  void resume() override;
+  void stepOver() override;
+  std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
+  searchInTextByLines(const StringView& text, const StringView& query,
+                      bool caseSensitive, bool isRegex) override;
+  void releaseObjectGroup(const StringView& objectGroup) override;
+  bool unwrapObject(std::unique_ptr<StringBuffer>*, const StringView& objectId,
+                    v8::Local<v8::Value>*, v8::Local<v8::Context>*,
+                    std::unique_ptr<StringBuffer>* objectGroup) override;
+  std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
+      v8::Local<v8::Context>, v8::Local<v8::Value>,
+      const StringView& groupName) override;
+
+  V8InspectorSession::Inspectable* inspectedObject(unsigned num);
+  static const unsigned kInspectedObjectBufferSize = 5;
+
+ private:
+  V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId,
+                         V8Inspector::Channel*, const StringView& state);
+  protocol::DictionaryValue* agentState(const String16& name);
+
+  // protocol::FrontendChannel implementation.
+  void sendProtocolResponse(int callId, const String16& message) override;
+  void sendProtocolNotification(const String16& message) override;
+  void flushProtocolNotifications() override;
+
+  int m_contextGroupId;
+  V8InspectorImpl* m_inspector;
+  V8Inspector::Channel* m_channel;
+  bool m_customObjectFormatterEnabled;
+
+  protocol::UberDispatcher m_dispatcher;
+  std::unique_ptr<protocol::DictionaryValue> m_state;
+
+  std::unique_ptr<V8RuntimeAgentImpl> m_runtimeAgent;
+  std::unique_ptr<V8DebuggerAgentImpl> m_debuggerAgent;
+  std::unique_ptr<V8HeapProfilerAgentImpl> m_heapProfilerAgent;
+  std::unique_ptr<V8ProfilerAgentImpl> m_profilerAgent;
+  std::unique_ptr<V8ConsoleAgentImpl> m_consoleAgent;
+  std::unique_ptr<V8SchemaAgentImpl> m_schemaAgent;
+  std::vector<std::unique_ptr<V8InspectorSession::Inspectable>>
+      m_inspectedObjects;
+
+  DISALLOW_COPY_AND_ASSIGN(V8InspectorSessionImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
diff --git a/src/inspector/v8-internal-value-type.cc b/src/inspector/v8-internal-value-type.cc
new file mode 100644
index 0000000..cde8bc9
--- /dev/null
+++ b/src/inspector/v8-internal-value-type.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-internal-value-type.h"
+
+#include "src/inspector/protocol-platform.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+namespace {
+
+v8::Local<v8::Private> internalSubtypePrivate(v8::Isolate* isolate) {
+  return v8::Private::ForApi(
+      isolate,
+      toV8StringInternalized(isolate, "V8InternalType#internalSubtype"));
+}
+
+v8::Local<v8::String> subtypeForInternalType(v8::Isolate* isolate,
+                                             V8InternalValueType type) {
+  switch (type) {
+    case V8InternalValueType::kEntry:
+      return toV8StringInternalized(isolate, "internal#entry");
+    case V8InternalValueType::kLocation:
+      return toV8StringInternalized(isolate, "internal#location");
+    case V8InternalValueType::kScope:
+      return toV8StringInternalized(isolate, "internal#scope");
+    case V8InternalValueType::kScopeList:
+      return toV8StringInternalized(isolate, "internal#scopeList");
+  }
+  UNREACHABLE();
+  return v8::Local<v8::String>();
+}
+
+}  // namespace
+
+bool markAsInternal(v8::Local<v8::Context> context,
+                    v8::Local<v8::Object> object, V8InternalValueType type) {
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+  v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
+  return object->SetPrivate(context, privateValue, subtype).FromMaybe(false);
+}
+
+bool markArrayEntriesAsInternal(v8::Local<v8::Context> context,
+                                v8::Local<v8::Array> array,
+                                V8InternalValueType type) {
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+  v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
+  for (uint32_t i = 0; i < array->Length(); ++i) {
+    v8::Local<v8::Value> entry;
+    if (!array->Get(context, i).ToLocal(&entry) || !entry->IsObject())
+      return false;
+    if (!entry.As<v8::Object>()
+             ->SetPrivate(context, privateValue, subtype)
+             .FromMaybe(false))
+      return false;
+  }
+  return true;
+}
+
+v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context> context,
+                                             v8::Local<v8::Object> object) {
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+  if (!object->HasPrivate(context, privateValue).FromMaybe(false))
+    return v8::Null(isolate);
+  v8::Local<v8::Value> subtypeValue;
+  if (!object->GetPrivate(context, privateValue).ToLocal(&subtypeValue) ||
+      !subtypeValue->IsString())
+    return v8::Null(isolate);
+  return subtypeValue;
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-internal-value-type.h b/src/inspector/v8-internal-value-type.h
new file mode 100644
index 0000000..e648a0d
--- /dev/null
+++ b/src/inspector/v8-internal-value-type.h
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#define V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+enum class V8InternalValueType { kEntry, kLocation, kScope, kScopeList };
+
+bool markAsInternal(v8::Local<v8::Context>, v8::Local<v8::Object>,
+                    V8InternalValueType);
+bool markArrayEntriesAsInternal(v8::Local<v8::Context>, v8::Local<v8::Array>,
+                                V8InternalValueType);
+v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context>,
+                                             v8::Local<v8::Object>);
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8INTERNALVALUETYPE_H_
diff --git a/src/inspector/v8-profiler-agent-impl.cc b/src/inspector/v8-profiler-agent-impl.cc
new file mode 100644
index 0000000..0511ca3
--- /dev/null
+++ b/src/inspector/v8-profiler-agent-impl.cc
@@ -0,0 +1,321 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-profiler-agent-impl.h"
+
+#include <vector>
+
+#include "src/base/atomicops.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-profiler.h"
+
+namespace v8_inspector {
+
+namespace ProfilerAgentState {
+static const char samplingInterval[] = "samplingInterval";
+static const char userInitiatedProfiling[] = "userInitiatedProfiling";
+static const char profilerEnabled[] = "profilerEnabled";
+}
+
+namespace {
+
+std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>>
+buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
+  unsigned lineCount = node->GetHitLineCount();
+  if (!lineCount) return nullptr;
+  auto array = protocol::Array<protocol::Profiler::PositionTickInfo>::create();
+  std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
+  if (node->GetLineTicks(&entries[0], lineCount)) {
+    for (unsigned i = 0; i < lineCount; i++) {
+      std::unique_ptr<protocol::Profiler::PositionTickInfo> line =
+          protocol::Profiler::PositionTickInfo::create()
+              .setLine(entries[i].line)
+              .setTicks(entries[i].hit_count)
+              .build();
+      array->addItem(std::move(line));
+    }
+  }
+  return array;
+}
+
+std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
+    v8::Isolate* isolate, const v8::CpuProfileNode* node) {
+  v8::HandleScope handleScope(isolate);
+  auto callFrame =
+      protocol::Runtime::CallFrame::create()
+          .setFunctionName(toProtocolString(node->GetFunctionName()))
+          .setScriptId(String16::fromInteger(node->GetScriptId()))
+          .setUrl(toProtocolString(node->GetScriptResourceName()))
+          .setLineNumber(node->GetLineNumber() - 1)
+          .setColumnNumber(node->GetColumnNumber() - 1)
+          .build();
+  auto result = protocol::Profiler::ProfileNode::create()
+                    .setCallFrame(std::move(callFrame))
+                    .setHitCount(node->GetHitCount())
+                    .setId(node->GetNodeId())
+                    .build();
+
+  const int childrenCount = node->GetChildrenCount();
+  if (childrenCount) {
+    auto children = protocol::Array<int>::create();
+    for (int i = 0; i < childrenCount; i++)
+      children->addItem(node->GetChild(i)->GetNodeId());
+    result->setChildren(std::move(children));
+  }
+
+  const char* deoptReason = node->GetBailoutReason();
+  if (deoptReason && deoptReason[0] && strcmp(deoptReason, "no reason"))
+    result->setDeoptReason(deoptReason);
+
+  auto positionTicks = buildInspectorObjectForPositionTicks(node);
+  if (positionTicks) result->setPositionTicks(std::move(positionTicks));
+
+  return result;
+}
+
+std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
+    v8::CpuProfile* v8profile) {
+  auto array = protocol::Array<int>::create();
+  int count = v8profile->GetSamplesCount();
+  for (int i = 0; i < count; i++)
+    array->addItem(v8profile->GetSample(i)->GetNodeId());
+  return array;
+}
+
+std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
+    v8::CpuProfile* v8profile) {
+  auto array = protocol::Array<int>::create();
+  int count = v8profile->GetSamplesCount();
+  uint64_t lastTime = v8profile->GetStartTime();
+  for (int i = 0; i < count; i++) {
+    uint64_t ts = v8profile->GetSampleTimestamp(i);
+    array->addItem(static_cast<int>(ts - lastTime));
+    lastTime = ts;
+  }
+  return array;
+}
+
+void flattenNodesTree(v8::Isolate* isolate, const v8::CpuProfileNode* node,
+                      protocol::Array<protocol::Profiler::ProfileNode>* list) {
+  list->addItem(buildInspectorObjectFor(isolate, node));
+  const int childrenCount = node->GetChildrenCount();
+  for (int i = 0; i < childrenCount; i++)
+    flattenNodesTree(isolate, node->GetChild(i), list);
+}
+
+std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
+    v8::Isolate* isolate, v8::CpuProfile* v8profile) {
+  auto nodes = protocol::Array<protocol::Profiler::ProfileNode>::create();
+  flattenNodesTree(isolate, v8profile->GetTopDownRoot(), nodes.get());
+  return protocol::Profiler::Profile::create()
+      .setNodes(std::move(nodes))
+      .setStartTime(static_cast<double>(v8profile->GetStartTime()))
+      .setEndTime(static_cast<double>(v8profile->GetEndTime()))
+      .setSamples(buildInspectorObjectForSamples(v8profile))
+      .setTimeDeltas(buildInspectorObjectForTimestamps(v8profile))
+      .build();
+}
+
+std::unique_ptr<protocol::Debugger::Location> currentDebugLocation(
+    V8InspectorImpl* inspector) {
+  std::unique_ptr<V8StackTraceImpl> callStack =
+      inspector->debugger()->captureStackTrace(false /* fullStack */);
+  auto location = protocol::Debugger::Location::create()
+                      .setScriptId(toString16(callStack->topScriptId()))
+                      .setLineNumber(callStack->topLineNumber())
+                      .build();
+  location->setColumnNumber(callStack->topColumnNumber());
+  return location;
+}
+
+volatile int s_lastProfileId = 0;
+
+}  // namespace
+
+class V8ProfilerAgentImpl::ProfileDescriptor {
+ public:
+  ProfileDescriptor(const String16& id, const String16& title)
+      : m_id(id), m_title(title) {}
+  String16 m_id;
+  String16 m_title;
+};
+
+V8ProfilerAgentImpl::V8ProfilerAgentImpl(
+    V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+    protocol::DictionaryValue* state)
+    : m_session(session),
+      m_isolate(m_session->inspector()->isolate()),
+      m_profiler(nullptr),
+      m_state(state),
+      m_frontend(frontendChannel),
+      m_enabled(false),
+      m_recordingCPUProfile(false) {}
+
+V8ProfilerAgentImpl::~V8ProfilerAgentImpl() {
+  if (m_profiler) m_profiler->Dispose();
+}
+
+void V8ProfilerAgentImpl::consoleProfile(const String16& title) {
+  if (!m_enabled) return;
+  String16 id = nextProfileId();
+  m_startedProfiles.push_back(ProfileDescriptor(id, title));
+  startProfiling(id);
+  m_frontend.consoleProfileStarted(
+      id, currentDebugLocation(m_session->inspector()), title);
+}
+
+void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
+  if (!m_enabled) return;
+  String16 id;
+  String16 resolvedTitle;
+  // Take last started profile if no title was passed.
+  if (title.isEmpty()) {
+    if (m_startedProfiles.empty()) return;
+    id = m_startedProfiles.back().m_id;
+    resolvedTitle = m_startedProfiles.back().m_title;
+    m_startedProfiles.pop_back();
+  } else {
+    for (size_t i = 0; i < m_startedProfiles.size(); i++) {
+      if (m_startedProfiles[i].m_title == title) {
+        resolvedTitle = title;
+        id = m_startedProfiles[i].m_id;
+        m_startedProfiles.erase(m_startedProfiles.begin() + i);
+        break;
+      }
+    }
+    if (id.isEmpty()) return;
+  }
+  std::unique_ptr<protocol::Profiler::Profile> profile =
+      stopProfiling(id, true);
+  if (!profile) return;
+  std::unique_ptr<protocol::Debugger::Location> location =
+      currentDebugLocation(m_session->inspector());
+  m_frontend.consoleProfileFinished(id, std::move(location), std::move(profile),
+                                    resolvedTitle);
+}
+
+void V8ProfilerAgentImpl::enable(ErrorString*) {
+  if (m_enabled) return;
+  m_enabled = true;
+  DCHECK(!m_profiler);
+  m_profiler = v8::CpuProfiler::New(m_isolate);
+  m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
+}
+
+void V8ProfilerAgentImpl::disable(ErrorString* errorString) {
+  if (!m_enabled) return;
+  for (size_t i = m_startedProfiles.size(); i > 0; --i)
+    stopProfiling(m_startedProfiles[i - 1].m_id, false);
+  m_startedProfiles.clear();
+  stop(nullptr, nullptr);
+  m_profiler->Dispose();
+  m_profiler = nullptr;
+  m_enabled = false;
+  m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
+}
+
+void V8ProfilerAgentImpl::setSamplingInterval(ErrorString* error,
+                                              int interval) {
+  if (m_recordingCPUProfile) {
+    *error = "Cannot change sampling interval when profiling.";
+    return;
+  }
+  m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
+  m_profiler->SetSamplingInterval(interval);
+}
+
+void V8ProfilerAgentImpl::restore() {
+  DCHECK(!m_enabled);
+  if (!m_state->booleanProperty(ProfilerAgentState::profilerEnabled, false))
+    return;
+  m_enabled = true;
+  DCHECK(!m_profiler);
+  m_profiler = v8::CpuProfiler::New(m_isolate);
+  int interval = 0;
+  m_state->getInteger(ProfilerAgentState::samplingInterval, &interval);
+  if (interval) m_profiler->SetSamplingInterval(interval);
+  if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
+                               false)) {
+    ErrorString error;
+    start(&error);
+  }
+}
+
+void V8ProfilerAgentImpl::start(ErrorString* error) {
+  if (m_recordingCPUProfile) return;
+  if (!m_enabled) {
+    *error = "Profiler is not enabled";
+    return;
+  }
+  m_recordingCPUProfile = true;
+  m_frontendInitiatedProfileId = nextProfileId();
+  startProfiling(m_frontendInitiatedProfileId);
+  m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, true);
+}
+
+void V8ProfilerAgentImpl::stop(
+    ErrorString* errorString,
+    std::unique_ptr<protocol::Profiler::Profile>* profile) {
+  if (!m_recordingCPUProfile) {
+    if (errorString) *errorString = "No recording profiles found";
+    return;
+  }
+  m_recordingCPUProfile = false;
+  std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
+      stopProfiling(m_frontendInitiatedProfileId, !!profile);
+  if (profile) {
+    *profile = std::move(cpuProfile);
+    if (!profile->get() && errorString) *errorString = "Profile is not found";
+  }
+  m_frontendInitiatedProfileId = String16();
+  m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, false);
+}
+
+String16 V8ProfilerAgentImpl::nextProfileId() {
+  return String16::fromInteger(
+      v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
+}
+
+void V8ProfilerAgentImpl::startProfiling(const String16& title) {
+  v8::HandleScope handleScope(m_isolate);
+  m_profiler->StartProfiling(toV8String(m_isolate, title), true);
+}
+
+std::unique_ptr<protocol::Profiler::Profile> V8ProfilerAgentImpl::stopProfiling(
+    const String16& title, bool serialize) {
+  v8::HandleScope handleScope(m_isolate);
+  v8::CpuProfile* profile =
+      m_profiler->StopProfiling(toV8String(m_isolate, title));
+  if (!profile) return nullptr;
+  std::unique_ptr<protocol::Profiler::Profile> result;
+  if (serialize) result = createCPUProfile(m_isolate, profile);
+  profile->Delete();
+  return result;
+}
+
+bool V8ProfilerAgentImpl::isRecording() const {
+  return m_recordingCPUProfile || !m_startedProfiles.empty();
+}
+
+bool V8ProfilerAgentImpl::idleStarted() {
+  if (m_profiler) m_profiler->SetIdle(true);
+  return m_profiler;
+}
+
+bool V8ProfilerAgentImpl::idleFinished() {
+  if (m_profiler) m_profiler->SetIdle(false);
+  return m_profiler;
+}
+
+void V8ProfilerAgentImpl::collectSample() {
+  if (m_profiler) m_profiler->CollectSample();
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-profiler-agent-impl.h b/src/inspector/v8-profiler-agent-impl.h
new file mode 100644
index 0000000..ee89976
--- /dev/null
+++ b/src/inspector/v8-profiler-agent-impl.h
@@ -0,0 +1,74 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#define V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Profiler.h"
+
+namespace v8 {
+class CpuProfiler;
+class Isolate;
+}
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
+ public:
+  V8ProfilerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                      protocol::DictionaryValue* state);
+  ~V8ProfilerAgentImpl() override;
+
+  bool enabled() const { return m_enabled; }
+  void restore();
+
+  void enable(ErrorString*) override;
+  void disable(ErrorString*) override;
+  void setSamplingInterval(ErrorString*, int) override;
+  void start(ErrorString*) override;
+  void stop(ErrorString*,
+            std::unique_ptr<protocol::Profiler::Profile>*) override;
+
+  void consoleProfile(const String16& title);
+  void consoleProfileEnd(const String16& title);
+
+  bool idleStarted();
+  bool idleFinished();
+
+  void collectSample();
+
+ private:
+  String16 nextProfileId();
+
+  void startProfiling(const String16& title);
+  std::unique_ptr<protocol::Profiler::Profile> stopProfiling(
+      const String16& title, bool serialize);
+
+  bool isRecording() const;
+
+  V8InspectorSessionImpl* m_session;
+  v8::Isolate* m_isolate;
+  v8::CpuProfiler* m_profiler;
+  protocol::DictionaryValue* m_state;
+  protocol::Profiler::Frontend m_frontend;
+  bool m_enabled;
+  bool m_recordingCPUProfile;
+  class ProfileDescriptor;
+  std::vector<ProfileDescriptor> m_startedProfiles;
+  String16 m_frontendInitiatedProfileId;
+
+  DISALLOW_COPY_AND_ASSIGN(V8ProfilerAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
diff --git a/src/inspector/v8-regex.cc b/src/inspector/v8-regex.cc
new file mode 100644
index 0000000..47af70d
--- /dev/null
+++ b/src/inspector/v8-regex.cc
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-regex.h"
+
+#include <limits.h>
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
+                 bool caseSensitive, bool multiline)
+    : m_inspector(inspector) {
+  v8::Isolate* isolate = m_inspector->isolate();
+  v8::HandleScope handleScope(isolate);
+  v8::Local<v8::Context> context = m_inspector->regexContext();
+  v8::Context::Scope contextScope(context);
+  v8::TryCatch tryCatch(isolate);
+
+  unsigned flags = v8::RegExp::kNone;
+  if (!caseSensitive) flags |= v8::RegExp::kIgnoreCase;
+  if (multiline) flags |= v8::RegExp::kMultiline;
+
+  v8::Local<v8::RegExp> regex;
+  if (v8::RegExp::New(context, toV8String(isolate, pattern),
+                      static_cast<v8::RegExp::Flags>(flags))
+          .ToLocal(&regex))
+    m_regex.Reset(isolate, regex);
+  else if (tryCatch.HasCaught())
+    m_errorMessage = toProtocolString(tryCatch.Message()->Get());
+  else
+    m_errorMessage = "Internal error";
+}
+
+int V8Regex::match(const String16& string, int startFrom,
+                   int* matchLength) const {
+  if (matchLength) *matchLength = 0;
+
+  if (m_regex.IsEmpty() || string.isEmpty()) return -1;
+
+  // v8 strings are limited to int.
+  if (string.length() > INT_MAX) return -1;
+
+  v8::Isolate* isolate = m_inspector->isolate();
+  v8::HandleScope handleScope(isolate);
+  v8::Local<v8::Context> context = m_inspector->regexContext();
+  v8::MicrotasksScope microtasks(isolate,
+                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
+  v8::TryCatch tryCatch(isolate);
+
+  v8::Local<v8::RegExp> regex = m_regex.Get(isolate);
+  v8::Local<v8::Value> exec;
+  if (!regex->Get(context, toV8StringInternalized(isolate, "exec"))
+           .ToLocal(&exec))
+    return -1;
+  v8::Local<v8::Value> argv[] = {
+      toV8String(isolate, string.substring(startFrom))};
+  v8::Local<v8::Value> returnValue;
+  if (!exec.As<v8::Function>()
+           ->Call(context, regex, arraysize(argv), argv)
+           .ToLocal(&returnValue))
+    return -1;
+
+  // RegExp#exec returns null if there's no match, otherwise it returns an
+  // Array of strings with the first being the whole match string and others
+  // being subgroups. The Array also has some random properties tacked on like
+  // "index" which is the offset of the match.
+  //
+  // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/RegExp/exec
+
+  DCHECK(!returnValue.IsEmpty());
+  if (!returnValue->IsArray()) return -1;
+
+  v8::Local<v8::Array> result = returnValue.As<v8::Array>();
+  v8::Local<v8::Value> matchOffset;
+  if (!result->Get(context, toV8StringInternalized(isolate, "index"))
+           .ToLocal(&matchOffset))
+    return -1;
+  if (matchLength) {
+    v8::Local<v8::Value> match;
+    if (!result->Get(context, 0).ToLocal(&match)) return -1;
+    *matchLength = match.As<v8::String>()->Length();
+  }
+
+  return matchOffset.As<v8::Int32>()->Value() + startFrom;
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-regex.h b/src/inspector/v8-regex.h
new file mode 100644
index 0000000..b4b1f8c
--- /dev/null
+++ b/src/inspector/v8-regex.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8REGEX_H_
+#define V8_INSPECTOR_V8REGEX_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+enum MultilineMode { MultilineDisabled, MultilineEnabled };
+
+class V8Regex {
+ public:
+  V8Regex(V8InspectorImpl*, const String16&, bool caseSensitive,
+          bool multiline = false);
+  int match(const String16&, int startFrom = 0, int* matchLength = 0) const;
+  bool isValid() const { return !m_regex.IsEmpty(); }
+  const String16& errorMessage() const { return m_errorMessage; }
+
+ private:
+  V8InspectorImpl* m_inspector;
+  v8::Global<v8::RegExp> m_regex;
+  String16 m_errorMessage;
+
+  DISALLOW_COPY_AND_ASSIGN(V8Regex);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8REGEX_H_
diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc
new file mode 100644
index 0000000..640ec31
--- /dev/null
+++ b/src/inspector/v8-runtime-agent-impl.cc
@@ -0,0 +1,738 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-runtime-agent-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace V8RuntimeAgentImplState {
+static const char customObjectFormatterEnabled[] =
+    "customObjectFormatterEnabled";
+static const char runtimeEnabled[] = "runtimeEnabled";
+};
+
+using protocol::Runtime::RemoteObject;
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+  if (hasError) *errorString = "Internal error";
+  return hasError;
+}
+
+namespace {
+
+template <typename Callback>
+class ProtocolPromiseHandler {
+ public:
+  static void add(V8InspectorImpl* inspector, v8::Local<v8::Context> context,
+                  v8::MaybeLocal<v8::Value> value,
+                  const String16& notPromiseError, int contextGroupId,
+                  int executionContextId, const String16& objectGroup,
+                  bool returnByValue, bool generatePreview,
+                  std::unique_ptr<Callback> callback) {
+    if (value.IsEmpty()) {
+      callback->sendFailure("Internal error");
+      return;
+    }
+    if (!value.ToLocalChecked()->IsPromise()) {
+      callback->sendFailure(notPromiseError);
+      return;
+    }
+    v8::MicrotasksScope microtasks_scope(inspector->isolate(),
+                                         v8::MicrotasksScope::kRunMicrotasks);
+    v8::Local<v8::Promise> promise =
+        v8::Local<v8::Promise>::Cast(value.ToLocalChecked());
+    Callback* rawCallback = callback.get();
+    ProtocolPromiseHandler<Callback>* handler = new ProtocolPromiseHandler(
+        inspector, contextGroupId, executionContextId, objectGroup,
+        returnByValue, generatePreview, std::move(callback));
+    v8::Local<v8::Value> wrapper = handler->m_wrapper.Get(inspector->isolate());
+
+    v8::Local<v8::Function> thenCallbackFunction =
+        v8::Function::New(context, thenCallback, wrapper, 0,
+                          v8::ConstructorBehavior::kThrow)
+            .ToLocalChecked();
+    if (promise->Then(context, thenCallbackFunction).IsEmpty()) {
+      rawCallback->sendFailure("Internal error");
+      return;
+    }
+    v8::Local<v8::Function> catchCallbackFunction =
+        v8::Function::New(context, catchCallback, wrapper, 0,
+                          v8::ConstructorBehavior::kThrow)
+            .ToLocalChecked();
+    if (promise->Catch(context, catchCallbackFunction).IsEmpty()) {
+      rawCallback->sendFailure("Internal error");
+      return;
+    }
+  }
+
+ private:
+  static void thenCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+    ProtocolPromiseHandler<Callback>* handler =
+        static_cast<ProtocolPromiseHandler<Callback>*>(
+            info.Data().As<v8::External>()->Value());
+    DCHECK(handler);
+    v8::Local<v8::Value> value =
+        info.Length() > 0
+            ? info[0]
+            : v8::Local<v8::Value>::Cast(v8::Undefined(info.GetIsolate()));
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue(
+        handler->wrapObject(value));
+    if (!wrappedValue) return;
+    handler->m_callback->sendSuccess(
+        std::move(wrappedValue), Maybe<protocol::Runtime::ExceptionDetails>());
+  }
+
+  static void catchCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+    ProtocolPromiseHandler<Callback>* handler =
+        static_cast<ProtocolPromiseHandler<Callback>*>(
+            info.Data().As<v8::External>()->Value());
+    DCHECK(handler);
+    v8::Local<v8::Value> value =
+        info.Length() > 0
+            ? info[0]
+            : v8::Local<v8::Value>::Cast(v8::Undefined(info.GetIsolate()));
+
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue(
+        handler->wrapObject(value));
+    if (!wrappedValue) return;
+
+    std::unique_ptr<V8StackTraceImpl> stack =
+        handler->m_inspector->debugger()->captureStackTrace(true);
+    std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+        protocol::Runtime::ExceptionDetails::create()
+            .setExceptionId(handler->m_inspector->nextExceptionId())
+            .setText("Uncaught (in promise)")
+            .setLineNumber(stack && !stack->isEmpty() ? stack->topLineNumber()
+                                                      : 0)
+            .setColumnNumber(
+                stack && !stack->isEmpty() ? stack->topColumnNumber() : 0)
+            .setException(wrappedValue->clone())
+            .build();
+    if (stack)
+      exceptionDetails->setStackTrace(stack->buildInspectorObjectImpl());
+    if (stack && !stack->isEmpty())
+      exceptionDetails->setScriptId(toString16(stack->topScriptId()));
+    handler->m_callback->sendSuccess(std::move(wrappedValue),
+                                     std::move(exceptionDetails));
+  }
+
+  ProtocolPromiseHandler(V8InspectorImpl* inspector, int contextGroupId,
+                         int executionContextId, const String16& objectGroup,
+                         bool returnByValue, bool generatePreview,
+                         std::unique_ptr<Callback> callback)
+      : m_inspector(inspector),
+        m_contextGroupId(contextGroupId),
+        m_executionContextId(executionContextId),
+        m_objectGroup(objectGroup),
+        m_returnByValue(returnByValue),
+        m_generatePreview(generatePreview),
+        m_callback(std::move(callback)),
+        m_wrapper(inspector->isolate(),
+                  v8::External::New(inspector->isolate(), this)) {
+    m_wrapper.SetWeak(this, cleanup, v8::WeakCallbackType::kParameter);
+  }
+
+  static void cleanup(
+      const v8::WeakCallbackInfo<ProtocolPromiseHandler<Callback>>& data) {
+    if (!data.GetParameter()->m_wrapper.IsEmpty()) {
+      data.GetParameter()->m_wrapper.Reset();
+      data.SetSecondPassCallback(cleanup);
+    } else {
+      data.GetParameter()->m_callback->sendFailure("Promise was collected");
+      delete data.GetParameter();
+    }
+  }
+
+  std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+      v8::Local<v8::Value> value) {
+    ErrorString errorString;
+    InjectedScript::ContextScope scope(&errorString, m_inspector,
+                                       m_contextGroupId, m_executionContextId);
+    if (!scope.initialize()) {
+      m_callback->sendFailure(errorString);
+      return nullptr;
+    }
+    std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue =
+        scope.injectedScript()->wrapObject(&errorString, value, m_objectGroup,
+                                           m_returnByValue, m_generatePreview);
+    if (!wrappedValue) {
+      m_callback->sendFailure(errorString);
+      return nullptr;
+    }
+    return wrappedValue;
+  }
+
+  V8InspectorImpl* m_inspector;
+  int m_contextGroupId;
+  int m_executionContextId;
+  String16 m_objectGroup;
+  bool m_returnByValue;
+  bool m_generatePreview;
+  std::unique_ptr<Callback> m_callback;
+  v8::Global<v8::External> m_wrapper;
+};
+
+template <typename Callback>
+bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
+                             v8::MaybeLocal<v8::Value> maybeResultValue,
+                             const v8::TryCatch& tryCatch,
+                             const String16& objectGroup, bool returnByValue,
+                             bool generatePreview, Callback* callback) {
+  std::unique_ptr<RemoteObject> result;
+  Maybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
+
+  ErrorString errorString;
+  injectedScript->wrapEvaluateResult(
+      &errorString, maybeResultValue, tryCatch, objectGroup, returnByValue,
+      generatePreview, &result, &exceptionDetails);
+  if (errorString.isEmpty()) {
+    callback->sendSuccess(std::move(result), exceptionDetails);
+    return true;
+  }
+  callback->sendFailure(errorString);
+  return false;
+}
+
+int ensureContext(ErrorString* errorString, V8InspectorImpl* inspector,
+                  int contextGroupId, const Maybe<int>& executionContextId) {
+  int contextId;
+  if (executionContextId.isJust()) {
+    contextId = executionContextId.fromJust();
+  } else {
+    v8::HandleScope handles(inspector->isolate());
+    v8::Local<v8::Context> defaultContext =
+        inspector->client()->ensureDefaultContextInGroup(contextGroupId);
+    if (defaultContext.IsEmpty()) {
+      *errorString = "Cannot find default execution context";
+      return 0;
+    }
+    contextId = V8Debugger::contextId(defaultContext);
+  }
+  return contextId;
+}
+
+}  // namespace
+
+V8RuntimeAgentImpl::V8RuntimeAgentImpl(
+    V8InspectorSessionImpl* session, protocol::FrontendChannel* FrontendChannel,
+    protocol::DictionaryValue* state)
+    : m_session(session),
+      m_state(state),
+      m_frontend(FrontendChannel),
+      m_inspector(session->inspector()),
+      m_enabled(false) {}
+
+V8RuntimeAgentImpl::~V8RuntimeAgentImpl() {}
+
+void V8RuntimeAgentImpl::evaluate(
+    const String16& expression, const Maybe<String16>& objectGroup,
+    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+    const Maybe<int>& executionContextId, const Maybe<bool>& returnByValue,
+    const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+    const Maybe<bool>& awaitPromise,
+    std::unique_ptr<EvaluateCallback> callback) {
+  ErrorString errorString;
+  int contextId =
+      ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
+                    executionContextId);
+  if (!errorString.isEmpty()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  InjectedScript::ContextScope scope(&errorString, m_inspector,
+                                     m_session->contextGroupId(), contextId);
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+  if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
+
+  if (includeCommandLineAPI.fromMaybe(false) &&
+      !scope.installCommandLineAPI()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  bool evalIsDisabled = !scope.context()->IsCodeGenerationFromStringsAllowed();
+  // Temporarily enable allow evals for inspector.
+  if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
+
+  v8::MaybeLocal<v8::Value> maybeResultValue;
+  v8::Local<v8::Script> script = m_inspector->compileScript(
+      scope.context(), toV8String(m_inspector->isolate(), expression),
+      String16(), false);
+  if (!script.IsEmpty())
+    maybeResultValue = m_inspector->runCompiledScript(scope.context(), script);
+
+  if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
+
+  // Re-initialize after running client's code, as it could have destroyed
+  // context or session.
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+    wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+                            scope.tryCatch(), objectGroup.fromMaybe(""),
+                            returnByValue.fromMaybe(false),
+                            generatePreview.fromMaybe(false), callback.get());
+    return;
+  }
+  ProtocolPromiseHandler<EvaluateCallback>::add(
+      m_inspector, scope.context(), maybeResultValue,
+      "Result of the evaluation is not a promise", m_session->contextGroupId(),
+      scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
+      returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+      std::move(callback));
+}
+
+void V8RuntimeAgentImpl::awaitPromise(
+    const String16& promiseObjectId, const Maybe<bool>& returnByValue,
+    const Maybe<bool>& generatePreview,
+    std::unique_ptr<AwaitPromiseCallback> callback) {
+  ErrorString errorString;
+  InjectedScript::ObjectScope scope(
+      &errorString, m_inspector, m_session->contextGroupId(), promiseObjectId);
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+  ProtocolPromiseHandler<AwaitPromiseCallback>::add(
+      m_inspector, scope.context(), scope.object(),
+      "Could not find promise with given id", m_session->contextGroupId(),
+      scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
+      returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+      std::move(callback));
+}
+
+void V8RuntimeAgentImpl::callFunctionOn(
+    const String16& objectId, const String16& expression,
+    const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
+        optionalArguments,
+    const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
+    const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+    const Maybe<bool>& awaitPromise,
+    std::unique_ptr<CallFunctionOnCallback> callback) {
+  ErrorString errorString;
+  InjectedScript::ObjectScope scope(&errorString, m_inspector,
+                                    m_session->contextGroupId(), objectId);
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  std::unique_ptr<v8::Local<v8::Value>[]> argv = nullptr;
+  int argc = 0;
+  if (optionalArguments.isJust()) {
+    protocol::Array<protocol::Runtime::CallArgument>* arguments =
+        optionalArguments.fromJust();
+    argc = static_cast<int>(arguments->length());
+    argv.reset(new v8::Local<v8::Value>[argc]);
+    for (int i = 0; i < argc; ++i) {
+      v8::Local<v8::Value> argumentValue;
+      if (!scope.injectedScript()
+               ->resolveCallArgument(&errorString, arguments->get(i))
+               .ToLocal(&argumentValue)) {
+        callback->sendFailure(errorString);
+        return;
+      }
+      argv[i] = argumentValue;
+    }
+  }
+
+  if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+  if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
+
+  v8::MaybeLocal<v8::Value> maybeFunctionValue =
+      m_inspector->compileAndRunInternalScript(
+          scope.context(),
+          toV8String(m_inspector->isolate(), "(" + expression + ")"));
+  // Re-initialize after running client's code, as it could have destroyed
+  // context or session.
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  if (scope.tryCatch().HasCaught()) {
+    wrapEvaluateResultAsync(scope.injectedScript(), maybeFunctionValue,
+                            scope.tryCatch(), scope.objectGroupName(), false,
+                            false, callback.get());
+    return;
+  }
+
+  v8::Local<v8::Value> functionValue;
+  if (!maybeFunctionValue.ToLocal(&functionValue) ||
+      !functionValue->IsFunction()) {
+    callback->sendFailure("Given expression does not evaluate to a function");
+    return;
+  }
+
+  v8::MaybeLocal<v8::Value> maybeResultValue = m_inspector->callFunction(
+      functionValue.As<v8::Function>(), scope.context(), scope.object(), argc,
+      argv.get());
+  // Re-initialize after running client's code, as it could have destroyed
+  // context or session.
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+    wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+                            scope.tryCatch(), scope.objectGroupName(),
+                            returnByValue.fromMaybe(false),
+                            generatePreview.fromMaybe(false), callback.get());
+    return;
+  }
+
+  ProtocolPromiseHandler<CallFunctionOnCallback>::add(
+      m_inspector, scope.context(), maybeResultValue,
+      "Result of the function call is not a promise",
+      m_session->contextGroupId(),
+      scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
+      returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+      std::move(callback));
+}
+
+void V8RuntimeAgentImpl::getProperties(
+    ErrorString* errorString, const String16& objectId,
+    const Maybe<bool>& ownProperties, const Maybe<bool>& accessorPropertiesOnly,
+    const Maybe<bool>& generatePreview,
+    std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+        result,
+    Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
+        internalProperties,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+  using protocol::Runtime::InternalPropertyDescriptor;
+
+  InjectedScript::ObjectScope scope(errorString, m_inspector,
+                                    m_session->contextGroupId(), objectId);
+  if (!scope.initialize()) return;
+
+  scope.ignoreExceptionsAndMuteConsole();
+  if (!scope.object()->IsObject()) {
+    *errorString = "Value with given id is not an object";
+    return;
+  }
+
+  v8::Local<v8::Object> object = scope.object().As<v8::Object>();
+  scope.injectedScript()->getProperties(
+      errorString, object, scope.objectGroupName(),
+      ownProperties.fromMaybe(false), accessorPropertiesOnly.fromMaybe(false),
+      generatePreview.fromMaybe(false), result, exceptionDetails);
+  if (!errorString->isEmpty() || exceptionDetails->isJust() ||
+      accessorPropertiesOnly.fromMaybe(false))
+    return;
+  v8::Local<v8::Array> propertiesArray;
+  if (hasInternalError(errorString, !m_inspector->debugger()
+                                         ->internalProperties(scope.context(),
+                                                              scope.object())
+                                         .ToLocal(&propertiesArray)))
+    return;
+  std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
+      propertiesProtocolArray =
+          protocol::Array<InternalPropertyDescriptor>::create();
+  for (uint32_t i = 0; i < propertiesArray->Length(); i += 2) {
+    v8::Local<v8::Value> name;
+    if (hasInternalError(
+            errorString,
+            !propertiesArray->Get(scope.context(), i).ToLocal(&name)) ||
+        !name->IsString())
+      return;
+    v8::Local<v8::Value> value;
+    if (hasInternalError(
+            errorString,
+            !propertiesArray->Get(scope.context(), i + 1).ToLocal(&value)))
+      return;
+    std::unique_ptr<RemoteObject> wrappedValue =
+        scope.injectedScript()->wrapObject(errorString, value,
+                                           scope.objectGroupName());
+    if (!wrappedValue) return;
+    propertiesProtocolArray->addItem(
+        InternalPropertyDescriptor::create()
+            .setName(toProtocolString(name.As<v8::String>()))
+            .setValue(std::move(wrappedValue))
+            .build());
+  }
+  if (!propertiesProtocolArray->length()) return;
+  *internalProperties = std::move(propertiesProtocolArray);
+}
+
+void V8RuntimeAgentImpl::releaseObject(ErrorString* errorString,
+                                       const String16& objectId) {
+  InjectedScript::ObjectScope scope(errorString, m_inspector,
+                                    m_session->contextGroupId(), objectId);
+  if (!scope.initialize()) return;
+  scope.injectedScript()->releaseObject(objectId);
+}
+
+void V8RuntimeAgentImpl::releaseObjectGroup(ErrorString*,
+                                            const String16& objectGroup) {
+  m_session->releaseObjectGroup(objectGroup);
+}
+
+void V8RuntimeAgentImpl::runIfWaitingForDebugger(ErrorString* errorString) {
+  m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
+}
+
+void V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(ErrorString*,
+                                                         bool enabled) {
+  m_state->setBoolean(V8RuntimeAgentImplState::customObjectFormatterEnabled,
+                      enabled);
+  m_session->setCustomObjectFormatterEnabled(enabled);
+}
+
+void V8RuntimeAgentImpl::discardConsoleEntries(ErrorString*) {
+  V8ConsoleMessageStorage* storage =
+      m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
+  storage->clear();
+}
+
+void V8RuntimeAgentImpl::compileScript(
+    ErrorString* errorString, const String16& expression,
+    const String16& sourceURL, bool persistScript,
+    const Maybe<int>& executionContextId, Maybe<String16>* scriptId,
+    Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+  if (!m_enabled) {
+    *errorString = "Runtime agent is not enabled";
+    return;
+  }
+  int contextId =
+      ensureContext(errorString, m_inspector, m_session->contextGroupId(),
+                    executionContextId);
+  if (!errorString->isEmpty()) return;
+  InjectedScript::ContextScope scope(errorString, m_inspector,
+                                     m_session->contextGroupId(), contextId);
+  if (!scope.initialize()) return;
+
+  if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
+  v8::Local<v8::Script> script = m_inspector->compileScript(
+      scope.context(), toV8String(m_inspector->isolate(), expression),
+      sourceURL, false);
+  if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
+  if (script.IsEmpty()) {
+    if (scope.tryCatch().HasCaught())
+      *exceptionDetails = scope.injectedScript()->createExceptionDetails(
+          errorString, scope.tryCatch(), String16(), false);
+    else
+      *errorString = "Script compilation failed";
+    return;
+  }
+
+  if (!persistScript) return;
+
+  String16 scriptValueId =
+      String16::fromInteger(script->GetUnboundScript()->GetId());
+  std::unique_ptr<v8::Global<v8::Script>> global(
+      new v8::Global<v8::Script>(m_inspector->isolate(), script));
+  m_compiledScripts[scriptValueId] = std::move(global);
+  *scriptId = scriptValueId;
+}
+
+void V8RuntimeAgentImpl::runScript(
+    const String16& scriptId, const Maybe<int>& executionContextId,
+    const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
+    const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& returnByValue,
+    const Maybe<bool>& generatePreview, const Maybe<bool>& awaitPromise,
+    std::unique_ptr<RunScriptCallback> callback) {
+  if (!m_enabled) {
+    callback->sendFailure("Runtime agent is not enabled");
+    return;
+  }
+
+  auto it = m_compiledScripts.find(scriptId);
+  if (it == m_compiledScripts.end()) {
+    callback->sendFailure("No script with given id");
+    return;
+  }
+
+  ErrorString errorString;
+  int contextId =
+      ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
+                    executionContextId);
+  if (!errorString.isEmpty()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  InjectedScript::ContextScope scope(&errorString, m_inspector,
+                                     m_session->contextGroupId(), contextId);
+  if (!scope.initialize()) {
+    callback->sendFailure(errorString);
+    return;
+  }
+
+  if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+
+  std::unique_ptr<v8::Global<v8::Script>> scriptWrapper = std::move(it->second);
+  m_compiledScripts.erase(it);
+  v8::Local<v8::Script> script = scriptWrapper->Get(m_inspector->isolate());
+  if (script.IsEmpty()) {
+    callback->sendFailure("Script execution failed");
+    return;
+  }
+
+  if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
+    return;
+
+  v8::MaybeLocal<v8::Value> maybeResultValue =
+      m_inspector->runCompiledScript(scope.context(), script);
+
+  // Re-initialize after running client's code, as it could have destroyed
+  // context or session.
+  if (!scope.initialize()) return;
+
+  if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+    wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+                            scope.tryCatch(), objectGroup.fromMaybe(""),
+                            returnByValue.fromMaybe(false),
+                            generatePreview.fromMaybe(false), callback.get());
+    return;
+  }
+  ProtocolPromiseHandler<RunScriptCallback>::add(
+      m_inspector, scope.context(), maybeResultValue.ToLocalChecked(),
+      "Result of the script execution is not a promise",
+      m_session->contextGroupId(),
+      scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
+      returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+      std::move(callback));
+}
+
+void V8RuntimeAgentImpl::restore() {
+  if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
+    return;
+  m_frontend.executionContextsCleared();
+  ErrorString error;
+  enable(&error);
+  if (m_state->booleanProperty(
+          V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
+    m_session->setCustomObjectFormatterEnabled(true);
+}
+
+void V8RuntimeAgentImpl::enable(ErrorString* errorString) {
+  if (m_enabled) return;
+  m_inspector->client()->beginEnsureAllContextsInGroup(
+      m_session->contextGroupId());
+  m_enabled = true;
+  m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, true);
+  m_inspector->enableStackCapturingIfNeeded();
+  m_session->reportAllContexts(this);
+  V8ConsoleMessageStorage* storage =
+      m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
+  for (const auto& message : storage->messages()) {
+    if (!reportMessage(message.get(), false)) return;
+  }
+}
+
+void V8RuntimeAgentImpl::disable(ErrorString* errorString) {
+  if (!m_enabled) return;
+  m_enabled = false;
+  m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
+  m_inspector->disableStackCapturingIfNeeded();
+  m_session->discardInjectedScripts();
+  reset();
+  m_inspector->client()->endEnsureAllContextsInGroup(
+      m_session->contextGroupId());
+}
+
+void V8RuntimeAgentImpl::reset() {
+  m_compiledScripts.clear();
+  if (m_enabled) {
+    if (const V8InspectorImpl::ContextByIdMap* contexts =
+            m_inspector->contextGroup(m_session->contextGroupId())) {
+      for (auto& idContext : *contexts) idContext.second->setReported(false);
+    }
+    m_frontend.executionContextsCleared();
+  }
+}
+
+void V8RuntimeAgentImpl::reportExecutionContextCreated(
+    InspectedContext* context) {
+  if (!m_enabled) return;
+  context->setReported(true);
+  std::unique_ptr<protocol::Runtime::ExecutionContextDescription> description =
+      protocol::Runtime::ExecutionContextDescription::create()
+          .setId(context->contextId())
+          .setName(context->humanReadableName())
+          .setOrigin(context->origin())
+          .build();
+  if (!context->auxData().isEmpty())
+    description->setAuxData(protocol::DictionaryValue::cast(
+        protocol::parseJSON(context->auxData())));
+  m_frontend.executionContextCreated(std::move(description));
+}
+
+void V8RuntimeAgentImpl::reportExecutionContextDestroyed(
+    InspectedContext* context) {
+  if (m_enabled && context->isReported()) {
+    context->setReported(false);
+    m_frontend.executionContextDestroyed(context->contextId());
+  }
+}
+
+void V8RuntimeAgentImpl::inspect(
+    std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
+    std::unique_ptr<protocol::DictionaryValue> hints) {
+  if (m_enabled)
+    m_frontend.inspectRequested(std::move(objectToInspect), std::move(hints));
+}
+
+void V8RuntimeAgentImpl::messageAdded(V8ConsoleMessage* message) {
+  if (m_enabled) reportMessage(message, true);
+}
+
+bool V8RuntimeAgentImpl::reportMessage(V8ConsoleMessage* message,
+                                       bool generatePreview) {
+  message->reportToFrontend(&m_frontend, m_session, generatePreview);
+  m_frontend.flush();
+  return m_inspector->hasConsoleMessageStorage(m_session->contextGroupId());
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-runtime-agent-impl.h b/src/inspector/v8-runtime-agent-impl.h
new file mode 100644
index 0000000..edeeed4
--- /dev/null
+++ b/src/inspector/v8-runtime-agent-impl.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#define V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class InspectedContext;
+class RemoteObjectIdBase;
+class V8ConsoleMessage;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
+ public:
+  V8RuntimeAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                     protocol::DictionaryValue* state);
+  ~V8RuntimeAgentImpl() override;
+  void restore();
+
+  // Part of the protocol.
+  void enable(ErrorString*) override;
+  void disable(ErrorString*) override;
+  void evaluate(const String16& expression, const Maybe<String16>& objectGroup,
+                const Maybe<bool>& includeCommandLineAPI,
+                const Maybe<bool>& silent, const Maybe<int>& executionContextId,
+                const Maybe<bool>& returnByValue,
+                const Maybe<bool>& generatePreview,
+                const Maybe<bool>& userGesture, const Maybe<bool>& awaitPromise,
+                std::unique_ptr<EvaluateCallback>) override;
+  void awaitPromise(const String16& promiseObjectId,
+                    const Maybe<bool>& returnByValue,
+                    const Maybe<bool>& generatePreview,
+                    std::unique_ptr<AwaitPromiseCallback>) override;
+  void callFunctionOn(
+      const String16& objectId, const String16& expression,
+      const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
+          optionalArguments,
+      const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
+      const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+      const Maybe<bool>& awaitPromise,
+      std::unique_ptr<CallFunctionOnCallback>) override;
+  void releaseObject(ErrorString*, const String16& objectId) override;
+  void getProperties(
+      ErrorString*, const String16& objectId, const Maybe<bool>& ownProperties,
+      const Maybe<bool>& accessorPropertiesOnly,
+      const Maybe<bool>& generatePreview,
+      std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+          result,
+      Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
+          internalProperties,
+      Maybe<protocol::Runtime::ExceptionDetails>*) override;
+  void releaseObjectGroup(ErrorString*, const String16& objectGroup) override;
+  void runIfWaitingForDebugger(ErrorString*) override;
+  void setCustomObjectFormatterEnabled(ErrorString*, bool) override;
+  void discardConsoleEntries(ErrorString*) override;
+  void compileScript(ErrorString*, const String16& expression,
+                     const String16& sourceURL, bool persistScript,
+                     const Maybe<int>& executionContextId, Maybe<String16>*,
+                     Maybe<protocol::Runtime::ExceptionDetails>*) override;
+  void runScript(const String16&, const Maybe<int>& executionContextId,
+                 const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
+                 const Maybe<bool>& includeCommandLineAPI,
+                 const Maybe<bool>& returnByValue,
+                 const Maybe<bool>& generatePreview,
+                 const Maybe<bool>& awaitPromise,
+                 std::unique_ptr<RunScriptCallback>) override;
+
+  void reset();
+  void reportExecutionContextCreated(InspectedContext*);
+  void reportExecutionContextDestroyed(InspectedContext*);
+  void inspect(std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
+               std::unique_ptr<protocol::DictionaryValue> hints);
+  void messageAdded(V8ConsoleMessage*);
+  bool enabled() const { return m_enabled; }
+
+ private:
+  bool reportMessage(V8ConsoleMessage*, bool generatePreview);
+
+  V8InspectorSessionImpl* m_session;
+  protocol::DictionaryValue* m_state;
+  protocol::Runtime::Frontend m_frontend;
+  V8InspectorImpl* m_inspector;
+  bool m_enabled;
+  protocol::HashMap<String16, std::unique_ptr<v8::Global<v8::Script>>>
+      m_compiledScripts;
+
+  DISALLOW_COPY_AND_ASSIGN(V8RuntimeAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
diff --git a/src/inspector/v8-schema-agent-impl.cc b/src/inspector/v8-schema-agent-impl.cc
new file mode 100644
index 0000000..9eed5bd
--- /dev/null
+++ b/src/inspector/v8-schema-agent-impl.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-schema-agent-impl.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+
+namespace v8_inspector {
+
+V8SchemaAgentImpl::V8SchemaAgentImpl(V8InspectorSessionImpl* session,
+                                     protocol::FrontendChannel* frontendChannel,
+                                     protocol::DictionaryValue* state)
+    : m_session(session), m_frontend(frontendChannel) {}
+
+V8SchemaAgentImpl::~V8SchemaAgentImpl() {}
+
+void V8SchemaAgentImpl::getDomains(
+    ErrorString*,
+    std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
+  std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
+      m_session->supportedDomainsImpl();
+  *result = protocol::Array<protocol::Schema::Domain>::create();
+  for (size_t i = 0; i < domains.size(); ++i)
+    (*result)->addItem(std::move(domains[i]));
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-schema-agent-impl.h b/src/inspector/v8-schema-agent-impl.h
new file mode 100644
index 0000000..6150201
--- /dev/null
+++ b/src/inspector/v8-schema-agent-impl.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#define V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Schema.h"
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8SchemaAgentImpl : public protocol::Schema::Backend {
+ public:
+  V8SchemaAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+                    protocol::DictionaryValue* state);
+  ~V8SchemaAgentImpl() override;
+
+  void getDomains(
+      ErrorString*,
+      std::unique_ptr<protocol::Array<protocol::Schema::Domain>>*) override;
+
+ private:
+  V8InspectorSessionImpl* m_session;
+  protocol::Schema::Frontend m_frontend;
+
+  DISALLOW_COPY_AND_ASSIGN(V8SchemaAgentImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
diff --git a/src/inspector/v8-stack-trace-impl.cc b/src/inspector/v8-stack-trace-impl.cc
new file mode 100644
index 0000000..1a38c6d
--- /dev/null
+++ b/src/inspector/v8-stack-trace-impl.cc
@@ -0,0 +1,281 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-profiler.h"
+#include "include/v8-version.h"
+
+namespace v8_inspector {
+
+namespace {
+
+static const v8::StackTrace::StackTraceOptions stackTraceOptions =
+    static_cast<v8::StackTrace::StackTraceOptions>(
+        v8::StackTrace::kLineNumber | v8::StackTrace::kColumnOffset |
+        v8::StackTrace::kScriptId | v8::StackTrace::kScriptNameOrSourceURL |
+        v8::StackTrace::kFunctionName);
+
+V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame) {
+  String16 scriptId = String16::fromInteger(frame->GetScriptId());
+  String16 sourceName;
+  v8::Local<v8::String> sourceNameValue(frame->GetScriptNameOrSourceURL());
+  if (!sourceNameValue.IsEmpty())
+    sourceName = toProtocolString(sourceNameValue);
+
+  String16 functionName;
+  v8::Local<v8::String> functionNameValue(frame->GetFunctionName());
+  if (!functionNameValue.IsEmpty())
+    functionName = toProtocolString(functionNameValue);
+
+  int sourceLineNumber = frame->GetLineNumber();
+  int sourceColumn = frame->GetColumn();
+  return V8StackTraceImpl::Frame(functionName, scriptId, sourceName,
+                                 sourceLineNumber, sourceColumn);
+}
+
+void toFramesVector(v8::Local<v8::StackTrace> stackTrace,
+                    std::vector<V8StackTraceImpl::Frame>& frames,
+                    size_t maxStackSize, v8::Isolate* isolate) {
+  DCHECK(isolate->InContext());
+  int frameCount = stackTrace->GetFrameCount();
+  if (frameCount > static_cast<int>(maxStackSize))
+    frameCount = static_cast<int>(maxStackSize);
+  for (int i = 0; i < frameCount; i++) {
+    v8::Local<v8::StackFrame> stackFrame = stackTrace->GetFrame(i);
+    frames.push_back(toFrame(stackFrame));
+  }
+}
+
+}  //  namespace
+
+V8StackTraceImpl::Frame::Frame()
+    : m_functionName("undefined"),
+      m_scriptId(""),
+      m_scriptName("undefined"),
+      m_lineNumber(0),
+      m_columnNumber(0) {}
+
+V8StackTraceImpl::Frame::Frame(const String16& functionName,
+                               const String16& scriptId,
+                               const String16& scriptName, int lineNumber,
+                               int column)
+    : m_functionName(functionName),
+      m_scriptId(scriptId),
+      m_scriptName(scriptName),
+      m_lineNumber(lineNumber),
+      m_columnNumber(column) {
+  DCHECK(m_lineNumber != v8::Message::kNoLineNumberInfo);
+  DCHECK(m_columnNumber != v8::Message::kNoColumnInfo);
+}
+
+V8StackTraceImpl::Frame::~Frame() {}
+
+// buildInspectorObject() and SourceLocation's toTracedValue() should set the
+// same fields.
+// If either of them is modified, the other should be also modified.
+std::unique_ptr<protocol::Runtime::CallFrame>
+V8StackTraceImpl::Frame::buildInspectorObject() const {
+  return protocol::Runtime::CallFrame::create()
+      .setFunctionName(m_functionName)
+      .setScriptId(m_scriptId)
+      .setUrl(m_scriptName)
+      .setLineNumber(m_lineNumber - 1)
+      .setColumnNumber(m_columnNumber - 1)
+      .build();
+}
+
+V8StackTraceImpl::Frame V8StackTraceImpl::Frame::clone() const {
+  return Frame(m_functionName, m_scriptId, m_scriptName, m_lineNumber,
+               m_columnNumber);
+}
+
+// static
+void V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(
+    v8::Isolate* isolate, bool capture) {
+  isolate->SetCaptureStackTraceForUncaughtExceptions(
+      capture, V8StackTraceImpl::maxCallStackSizeToCapture, stackTraceOptions);
+}
+
+// static
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
+    V8Debugger* debugger, int contextGroupId,
+    v8::Local<v8::StackTrace> stackTrace, size_t maxStackSize,
+    const String16& description) {
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  v8::HandleScope scope(isolate);
+  std::vector<V8StackTraceImpl::Frame> frames;
+  if (!stackTrace.IsEmpty())
+    toFramesVector(stackTrace, frames, maxStackSize, isolate);
+
+  int maxAsyncCallChainDepth = 1;
+  V8StackTraceImpl* asyncCallChain = nullptr;
+  if (debugger && maxStackSize > 1) {
+    asyncCallChain = debugger->currentAsyncCallChain();
+    maxAsyncCallChainDepth = debugger->maxAsyncCallChainDepth();
+  }
+  // Do not accidentally append async call chain from another group. This should
+  // not
+  // happen if we have proper instrumentation, but let's double-check to be
+  // safe.
+  if (contextGroupId && asyncCallChain && asyncCallChain->m_contextGroupId &&
+      asyncCallChain->m_contextGroupId != contextGroupId) {
+    asyncCallChain = nullptr;
+    maxAsyncCallChainDepth = 1;
+  }
+
+  // Only the top stack in the chain may be empty, so ensure that second stack
+  // is non-empty (it's the top of appended chain).
+  if (asyncCallChain && asyncCallChain->isEmpty())
+    asyncCallChain = asyncCallChain->m_parent.get();
+
+  if (stackTrace.IsEmpty() && !asyncCallChain) return nullptr;
+
+  std::unique_ptr<V8StackTraceImpl> result(new V8StackTraceImpl(
+      contextGroupId, description, frames,
+      asyncCallChain ? asyncCallChain->cloneImpl() : nullptr));
+
+  // Crop to not exceed maxAsyncCallChainDepth.
+  V8StackTraceImpl* deepest = result.get();
+  while (deepest && maxAsyncCallChainDepth) {
+    deepest = deepest->m_parent.get();
+    maxAsyncCallChainDepth--;
+  }
+  if (deepest) deepest->m_parent.reset();
+
+  return result;
+}
+
+// static
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
+    V8Debugger* debugger, int contextGroupId, size_t maxStackSize,
+    const String16& description) {
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  v8::HandleScope handleScope(isolate);
+  v8::Local<v8::StackTrace> stackTrace;
+  if (isolate->InContext()) {
+    if (debugger) {
+      V8InspectorImpl* inspector = debugger->inspector();
+      V8ProfilerAgentImpl* profilerAgent =
+          inspector->enabledProfilerAgentForGroup(contextGroupId);
+      if (profilerAgent) profilerAgent->collectSample();
+    }
+    stackTrace = v8::StackTrace::CurrentStackTrace(
+        isolate, static_cast<int>(maxStackSize), stackTraceOptions);
+  }
+  return V8StackTraceImpl::create(debugger, contextGroupId, stackTrace,
+                                  maxStackSize, description);
+}
+
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
+  std::vector<Frame> framesCopy(m_frames);
+  return wrapUnique(
+      new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
+                           m_parent ? m_parent->cloneImpl() : nullptr));
+}
+
+std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
+  std::vector<Frame> frames;
+  for (size_t i = 0; i < m_frames.size(); i++)
+    frames.push_back(m_frames.at(i).clone());
+  return wrapUnique(
+      new V8StackTraceImpl(m_contextGroupId, m_description, frames, nullptr));
+}
+
+V8StackTraceImpl::V8StackTraceImpl(int contextGroupId,
+                                   const String16& description,
+                                   std::vector<Frame>& frames,
+                                   std::unique_ptr<V8StackTraceImpl> parent)
+    : m_contextGroupId(contextGroupId),
+      m_description(description),
+      m_parent(std::move(parent)) {
+  m_frames.swap(frames);
+}
+
+V8StackTraceImpl::~V8StackTraceImpl() {}
+
+StringView V8StackTraceImpl::topSourceURL() const {
+  DCHECK(m_frames.size());
+  return toStringView(m_frames[0].m_scriptName);
+}
+
+int V8StackTraceImpl::topLineNumber() const {
+  DCHECK(m_frames.size());
+  return m_frames[0].m_lineNumber;
+}
+
+int V8StackTraceImpl::topColumnNumber() const {
+  DCHECK(m_frames.size());
+  return m_frames[0].m_columnNumber;
+}
+
+StringView V8StackTraceImpl::topFunctionName() const {
+  DCHECK(m_frames.size());
+  return toStringView(m_frames[0].m_functionName);
+}
+
+StringView V8StackTraceImpl::topScriptId() const {
+  DCHECK(m_frames.size());
+  return toStringView(m_frames[0].m_scriptId);
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8StackTraceImpl::buildInspectorObjectImpl() const {
+  std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>> frames =
+      protocol::Array<protocol::Runtime::CallFrame>::create();
+  for (size_t i = 0; i < m_frames.size(); i++)
+    frames->addItem(m_frames.at(i).buildInspectorObject());
+
+  std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
+      protocol::Runtime::StackTrace::create()
+          .setCallFrames(std::move(frames))
+          .build();
+  if (!m_description.isEmpty()) stackTrace->setDescription(m_description);
+  if (m_parent) stackTrace->setParent(m_parent->buildInspectorObjectImpl());
+  return stackTrace;
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8StackTraceImpl::buildInspectorObjectForTail(V8Debugger* debugger) const {
+  v8::HandleScope handleScope(v8::Isolate::GetCurrent());
+  // Next call collapses possible empty stack and ensures
+  // maxAsyncCallChainDepth.
+  std::unique_ptr<V8StackTraceImpl> fullChain = V8StackTraceImpl::create(
+      debugger, m_contextGroupId, v8::Local<v8::StackTrace>(),
+      V8StackTraceImpl::maxCallStackSizeToCapture);
+  if (!fullChain || !fullChain->m_parent) return nullptr;
+  return fullChain->m_parent->buildInspectorObjectImpl();
+}
+
+std::unique_ptr<protocol::Runtime::API::StackTrace>
+V8StackTraceImpl::buildInspectorObject() const {
+  return buildInspectorObjectImpl();
+}
+
+std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
+  String16Builder stackTrace;
+  for (size_t i = 0; i < m_frames.size(); ++i) {
+    const Frame& frame = m_frames[i];
+    stackTrace.append("\n    at " + (frame.functionName().length()
+                                         ? frame.functionName()
+                                         : "(anonymous function)"));
+    stackTrace.append(" (");
+    stackTrace.append(frame.sourceURL());
+    stackTrace.append(':');
+    stackTrace.append(String16::fromInteger(frame.lineNumber()));
+    stackTrace.append(':');
+    stackTrace.append(String16::fromInteger(frame.columnNumber()));
+    stackTrace.append(')');
+  }
+  String16 string = stackTrace.toString();
+  return StringBufferImpl::adopt(string);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-stack-trace-impl.h b/src/inspector/v8-stack-trace-impl.h
new file mode 100644
index 0000000..f0a452e
--- /dev/null
+++ b/src/inspector/v8-stack-trace-impl.h
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#define V8_INSPECTOR_V8STACKTRACEIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class TracedValue;
+class V8Debugger;
+
+// Note: async stack trace may have empty top stack with non-empty tail to
+// indicate
+// that current native-only state had some async story.
+// On the other hand, any non-top async stack is guaranteed to be non-empty.
+class V8StackTraceImpl final : public V8StackTrace {
+ public:
+  static const size_t maxCallStackSizeToCapture = 200;
+
+  class Frame {
+   public:
+    Frame();
+    Frame(const String16& functionName, const String16& scriptId,
+          const String16& scriptName, int lineNumber, int column = 0);
+    ~Frame();
+
+    const String16& functionName() const { return m_functionName; }
+    const String16& scriptId() const { return m_scriptId; }
+    const String16& sourceURL() const { return m_scriptName; }
+    int lineNumber() const { return m_lineNumber; }
+    int columnNumber() const { return m_columnNumber; }
+    Frame clone() const;
+
+   private:
+    friend class V8StackTraceImpl;
+    std::unique_ptr<protocol::Runtime::CallFrame> buildInspectorObject() const;
+    void toTracedValue(TracedValue*) const;
+
+    String16 m_functionName;
+    String16 m_scriptId;
+    String16 m_scriptName;
+    int m_lineNumber;
+    int m_columnNumber;
+  };
+
+  static void setCaptureStackTraceForUncaughtExceptions(v8::Isolate*,
+                                                        bool capture);
+  static std::unique_ptr<V8StackTraceImpl> create(
+      V8Debugger*, int contextGroupId, v8::Local<v8::StackTrace>,
+      size_t maxStackSize, const String16& description = String16());
+  static std::unique_ptr<V8StackTraceImpl> capture(
+      V8Debugger*, int contextGroupId, size_t maxStackSize,
+      const String16& description = String16());
+
+  // This method drops the async chain. Use cloneImpl() instead.
+  std::unique_ptr<V8StackTrace> clone() override;
+  std::unique_ptr<V8StackTraceImpl> cloneImpl();
+  std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectForTail(
+      V8Debugger*) const;
+  std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl()
+      const;
+  ~V8StackTraceImpl() override;
+
+  // V8StackTrace implementation.
+  bool isEmpty() const override { return !m_frames.size(); };
+  StringView topSourceURL() const override;
+  int topLineNumber() const override;
+  int topColumnNumber() const override;
+  StringView topScriptId() const override;
+  StringView topFunctionName() const override;
+  std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
+      const override;
+  std::unique_ptr<StringBuffer> toString() const override;
+
+ private:
+  V8StackTraceImpl(int contextGroupId, const String16& description,
+                   std::vector<Frame>& frames,
+                   std::unique_ptr<V8StackTraceImpl> parent);
+
+  int m_contextGroupId;
+  String16 m_description;
+  std::vector<Frame> m_frames;
+  std::unique_ptr<V8StackTraceImpl> m_parent;
+
+  DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8STACKTRACEIMPL_H_
diff --git a/src/inspector/v8-value-copier.cc b/src/inspector/v8-value-copier.cc
new file mode 100644
index 0000000..09d86b7
--- /dev/null
+++ b/src/inspector/v8-value-copier.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-value-copier.h"
+
+namespace v8_inspector {
+
+namespace {
+
+static int kMaxDepth = 20;
+static int kMaxCalls = 1000;
+
+class V8ValueCopier {
+ public:
+  v8::MaybeLocal<v8::Value> copy(v8::Local<v8::Value> value, int depth) {
+    if (++m_calls > kMaxCalls || depth > kMaxDepth)
+      return v8::MaybeLocal<v8::Value>();
+
+    if (value.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+    if (value->IsNull() || value->IsUndefined() || value->IsBoolean() ||
+        value->IsString() || value->IsNumber())
+      return value;
+    if (!value->IsObject()) return v8::MaybeLocal<v8::Value>();
+    v8::Local<v8::Object> object = value.As<v8::Object>();
+    if (object->CreationContext() != m_from) return value;
+
+    if (object->IsArray()) {
+      v8::Local<v8::Array> array = object.As<v8::Array>();
+      v8::Local<v8::Array> result = v8::Array::New(m_isolate, array->Length());
+      if (!result->SetPrototype(m_to, v8::Null(m_isolate)).FromMaybe(false))
+        return v8::MaybeLocal<v8::Value>();
+      for (uint32_t i = 0; i < array->Length(); ++i) {
+        v8::Local<v8::Value> item;
+        if (!array->Get(m_from, i).ToLocal(&item))
+          return v8::MaybeLocal<v8::Value>();
+        v8::Local<v8::Value> copied;
+        if (!copy(item, depth + 1).ToLocal(&copied))
+          return v8::MaybeLocal<v8::Value>();
+        if (!createDataProperty(m_to, result, i, copied).FromMaybe(false))
+          return v8::MaybeLocal<v8::Value>();
+      }
+      return result;
+    }
+
+    v8::Local<v8::Object> result = v8::Object::New(m_isolate);
+    if (!result->SetPrototype(m_to, v8::Null(m_isolate)).FromMaybe(false))
+      return v8::MaybeLocal<v8::Value>();
+    v8::Local<v8::Array> properties;
+    if (!object->GetOwnPropertyNames(m_from).ToLocal(&properties))
+      return v8::MaybeLocal<v8::Value>();
+    for (uint32_t i = 0; i < properties->Length(); ++i) {
+      v8::Local<v8::Value> name;
+      if (!properties->Get(m_from, i).ToLocal(&name) || !name->IsString())
+        return v8::MaybeLocal<v8::Value>();
+      v8::Local<v8::Value> property;
+      if (!object->Get(m_from, name).ToLocal(&property))
+        return v8::MaybeLocal<v8::Value>();
+      v8::Local<v8::Value> copied;
+      if (!copy(property, depth + 1).ToLocal(&copied))
+        return v8::MaybeLocal<v8::Value>();
+      if (!createDataProperty(m_to, result, v8::Local<v8::String>::Cast(name),
+                              copied)
+               .FromMaybe(false))
+        return v8::MaybeLocal<v8::Value>();
+    }
+    return result;
+  }
+
+  v8::Isolate* m_isolate;
+  v8::Local<v8::Context> m_from;
+  v8::Local<v8::Context> m_to;
+  int m_calls;
+};
+
+}  // namespace
+
+v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
+    v8::Isolate* isolate, v8::Local<v8::Context> debuggerContext,
+    v8::Local<v8::Context> toContext, v8::Local<v8::Value> value) {
+  V8ValueCopier copier;
+  copier.m_isolate = isolate;
+  copier.m_from = debuggerContext;
+  copier.m_to = toContext;
+  copier.m_calls = 0;
+  return copier.copy(value, 0);
+}
+
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Object> object,
+                                   v8::Local<v8::Name> key,
+                                   v8::Local<v8::Value> value) {
+  v8::TryCatch tryCatch(context->GetIsolate());
+  v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+      context->GetIsolate(),
+      v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+  return object->CreateDataProperty(context, key, value);
+}
+
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
+                                   v8::Local<v8::Array> array, int index,
+                                   v8::Local<v8::Value> value) {
+  v8::TryCatch tryCatch(context->GetIsolate());
+  v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+      context->GetIsolate(),
+      v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+  return array->CreateDataProperty(context, index, value);
+}
+
+}  // namespace v8_inspector
diff --git a/src/inspector/v8-value-copier.h b/src/inspector/v8-value-copier.h
new file mode 100644
index 0000000..c24a564
--- /dev/null
+++ b/src/inspector/v8-value-copier.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8VALUECOPIER_H_
+#define V8_INSPECTOR_V8VALUECOPIER_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
+    v8::Isolate*, v8::Local<v8::Context> debuggerContext,
+    v8::Local<v8::Context> toContext, v8::Local<v8::Value>);
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
+                                   v8::Local<v8::Object>,
+                                   v8::Local<v8::Name> key,
+                                   v8::Local<v8::Value>);
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>, v8::Local<v8::Array>,
+                                   int index, v8::Local<v8::Value>);
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_V8VALUECOPIER_H_
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index a16cae7..2628b9f 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -7,41 +7,6 @@
 namespace v8 {
 namespace internal {
 
-namespace {
-// Constructors for common combined semantic and representation types.
-Type* SmiType(Zone* zone) {
-  return Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), zone);
-}
-
-
-Type* UntaggedIntegral32(Zone* zone) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone);
-}
-
-
-Type* AnyTagged(Zone* zone) {
-  return Type::Intersect(
-      Type::Any(),
-      Type::Union(Type::TaggedPointer(), Type::TaggedSigned(), zone), zone);
-}
-
-
-Type* ExternalPointer(Zone* zone) {
-  return Type::Intersect(Type::Internal(), Type::UntaggedPointer(), zone);
-}
-}  // namespace
-
-FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), parameter_count, zone)
-          ->AsFunction();
-  while (parameter_count-- != 0) {
-    function->InitParameter(parameter_count, AnyTagged(zone));
-  }
-  return function;
-}
 
 void CallInterfaceDescriptorData::InitializePlatformSpecific(
     int register_parameter_count, const Register* registers,
@@ -56,6 +21,22 @@
   }
 }
 
+void CallInterfaceDescriptorData::InitializePlatformIndependent(
+    int parameter_count, int extra_parameter_count,
+    const MachineType* machine_types) {
+  // InterfaceDescriptor owns a copy of the MachineType array.
+  // We only care about parameters, not receiver and result.
+  param_count_ = parameter_count + extra_parameter_count;
+  machine_types_.reset(NewArray<MachineType>(param_count_));
+  for (int i = 0; i < param_count_; i++) {
+    if (machine_types == NULL || i >= parameter_count) {
+      machine_types_[i] = MachineType::AnyTagged();
+    } else {
+      machine_types_[i] = machine_types[i];
+    }
+  }
+}
+
 const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
   CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
   size_t index = data_ - start;
@@ -79,15 +60,12 @@
   data->InitializePlatformSpecific(0, nullptr);
 }
 
-FunctionType*
-FastNewFunctionContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
-  function->InitParameter(0, AnyTagged(zone));
-  function->InitParameter(1, UntaggedIntegral32(zone));
-  return function;
+void FastNewFunctionContextDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
@@ -96,33 +74,28 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kReceiver, AnyTagged(zone));
-  function->InitParameter(kName, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  return function;
+void LoadDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kName, kSlot
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-
 void LoadDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType* LoadGlobalDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kSlot, SmiType(zone));
-  return function;
+void LoadGlobalDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kSlot
+  MachineType machine_types[] = {MachineType::TaggedSigned()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 void LoadGlobalDescriptor::InitializePlatformSpecific(
@@ -131,16 +104,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType*
-LoadGlobalWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kSlot, SmiType(zone));
-  function->InitParameter(kVector, AnyTagged(zone));
-  return function;
+void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kSlot, kVector
+  MachineType machine_types[] = {MachineType::TaggedSigned(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
@@ -150,76 +120,77 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType* StoreDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kReceiver, AnyTagged(zone));
-  function->InitParameter(kName, AnyTagged(zone));
-  function->InitParameter(kValue, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  return function;
+void StoreDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kName, kValue, kSlot
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::AnyTagged(), MachineType::TaggedSigned()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 void StoreDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
                           SlotRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
 
+  int len = arraysize(registers) - kStackArgumentsCount;
+  data->InitializePlatformSpecific(len, registers);
+}
 
 void StoreTransitionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
-                          MapRegister()};
-
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {
+      ReceiverRegister(), NameRegister(), MapRegister(),
+      ValueRegister(),    SlotRegister(), VectorRegister(),
+  };
+  int len = arraysize(registers) - kStackArgumentsCount;
+  data->InitializePlatformSpecific(len, registers);
 }
 
-
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
+void StoreTransitionDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  if (SlotRegister().is(no_reg)) {
-    Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
-                            MapRegister(), VectorRegister()};
-    data->InitializePlatformSpecific(arraysize(registers), registers);
-  } else {
-    Register registers[] = {ReceiverRegister(), NameRegister(),
-                            ValueRegister(),    MapRegister(),
-                            SlotRegister(),     VectorRegister()};
-    data->InitializePlatformSpecific(arraysize(registers), registers);
-  }
+  // kReceiver, kName, kMap, kValue, kSlot, kVector
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(),    MachineType::AnyTagged(),
+      MachineType::AnyTagged(),    MachineType::AnyTagged(),
+      MachineType::TaggedSigned(), MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kReceiver, AnyTagged(zone));
-  function->InitParameter(kName, AnyTagged(zone));
-  function->InitParameter(kValue, AnyTagged(zone));
-  function->InitParameter(kMap, AnyTagged(zone));
-  return function;
+void StoreNamedTransitionDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector, kName
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(),    MachineType::TaggedSigned(),
+      MachineType::AnyTagged(),    MachineType::AnyTagged(),
+      MachineType::TaggedSigned(), MachineType::AnyTagged(),
+      MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kSlot, UntaggedIntegral32(zone));
-  function->InitParameter(kValue, AnyTagged(zone));
-  return function;
+void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      ReceiverRegister(), FieldOffsetRegister(), MapRegister(),
+      ValueRegister(),    SlotRegister(),        VectorRegister(),
+      NameRegister(),
+  };
+  int len = arraysize(registers) - kStackArgumentsCount;
+  data->InitializePlatformSpecific(len, registers);
 }
 
+void StoreGlobalViaContextDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kSlot, kValue
+  MachineType machine_types[] = {MachineType::Int32(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
 
 void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -252,18 +223,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType*
-LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kReceiver, AnyTagged(zone));
-  function->InitParameter(kName, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  function->InitParameter(kVector, AnyTagged(zone));
-  return function;
+void LoadWithVectorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kName, kSlot, kVector
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::TaggedSigned(), MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 
@@ -274,63 +241,33 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType*
-VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
-  int arg_count = has_slot ? 6 : 5;
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), arg_count, zone)
-          ->AsFunction();
-  int index = 0;
-  // TODO(ishell): use ParameterIndices here
-  function->InitParameter(index++, AnyTagged(zone));  // receiver
-  function->InitParameter(index++, AnyTagged(zone));  // name
-  function->InitParameter(index++, AnyTagged(zone));  // value
-  function->InitParameter(index++, AnyTagged(zone));  // map
-  if (has_slot) {
-    function->InitParameter(index++, SmiType(zone));  // slot
-  }
-  function->InitParameter(index++, AnyTagged(zone));  // vector
-  return function;
-}
-
-FunctionType*
-StoreWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kReceiver, AnyTagged(zone));
-  function->InitParameter(kName, AnyTagged(zone));
-  function->InitParameter(kValue, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  function->InitParameter(kVector, AnyTagged(zone));
-  return function;
+void StoreWithVectorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kName, kValue, kSlot, kVector
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::AnyTagged(), MachineType::TaggedSigned(),
+      MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 void StoreWithVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
                           SlotRegister(), VectorRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  int len = arraysize(registers) - kStackArgumentsCount;
+  data->InitializePlatformSpecific(len, registers);
 }
 
-FunctionType*
-BinaryOpWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  DCHECK_EQ(parameter_count, kParameterCount);
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kLeft, AnyTagged(zone));
-  function->InitParameter(kRight, AnyTagged(zone));
-  function->InitParameter(kSlot, UntaggedIntegral32(zone));
-  function->InitParameter(kVector, AnyTagged(zone));
-  return function;
+void BinaryOpWithVectorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kLeft, kRight, kSlot, kVector
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 const Register ApiGetterDescriptor::ReceiverRegister() {
@@ -349,291 +286,204 @@
   data->InitializePlatformSpecific(0, nullptr);
 }
 
-CallInterfaceDescriptor OnStackArgsDescriptorBase::ForArgs(
-    Isolate* isolate, int parameter_count) {
-  switch (parameter_count) {
-    case 1:
-      return OnStackWith1ArgsDescriptor(isolate);
-    case 2:
-      return OnStackWith2ArgsDescriptor(isolate);
-    case 3:
-      return OnStackWith3ArgsDescriptor(isolate);
-    case 4:
-      return OnStackWith4ArgsDescriptor(isolate);
-    case 5:
-      return OnStackWith5ArgsDescriptor(isolate);
-    case 6:
-      return OnStackWith6ArgsDescriptor(isolate);
-    case 7:
-      return OnStackWith7ArgsDescriptor(isolate);
-    default:
-      UNREACHABLE();
-      return VoidDescriptor(isolate);
-  }
-}
-
-FunctionType*
-OnStackArgsDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
-    Isolate* isolate, int register_parameter_count, int parameter_count) {
-  DCHECK_EQ(0, register_parameter_count);
-  DCHECK_GT(parameter_count, 0);
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), AnyTagged(zone), parameter_count, zone)
-          ->AsFunction();
-  for (int i = 0; i < parameter_count; i++) {
-    function->InitParameter(i, AnyTagged(zone));
-  }
-  return function;
-}
-
-void OnStackArgsDescriptorBase::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  data->InitializePlatformSpecific(0, nullptr);
-}
-
 void GrowArrayElementsDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ObjectRegister(), KeyRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType*
-VarArgFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), AnyTagged(zone), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  return function;
+void VarArgFunctionDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kActualArgumentsCount
+  MachineType machine_types[] = {MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kClosure, AnyTagged(zone));
-  function->InitParameter(kLiteralIndex, SmiType(zone));
-  function->InitParameter(kPattern, AnyTagged(zone));
-  function->InitParameter(kFlags, AnyTagged(zone));
-  return function;
+void FastCloneRegExpDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kClosure, kLiteralIndex, kPattern, kFlags
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::TaggedSigned(),
+      MachineType::AnyTagged(), MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kClosure, AnyTagged(zone));
-  function->InitParameter(kLiteralIndex, SmiType(zone));
-  function->InitParameter(kConstantElements, AnyTagged(zone));
-  return function;
+void FastCloneShallowArrayDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kClosure, kLiteralIndex, kConstantElements
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kVector, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  return function;
+void CreateAllocationSiteDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kVector, kSlot
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kVector, AnyTagged(zone));
-  function->InitParameter(kSlot, SmiType(zone));
-  function->InitParameter(kValue, AnyTagged(zone));
-  return function;
+void CreateWeakCellDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kVector, kSlot, kValue
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  return function;
+void CallTrampolineDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kActualArgumentsCount
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType* ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, AnyTagged(zone));
-  function->InitParameter(kNewTarget, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  function->InitParameter(kAllocationSite, AnyTagged(zone));
-  return function;
+void ConstructStubDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, AnyTagged(zone));
-  function->InitParameter(kNewTarget, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  return function;
+void ConstructTrampolineDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kNewTarget, kActualArgumentsCount
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kSlot, SmiType(zone));
-  return function;
+void CallFunctionWithFeedbackDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kSlot
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
-    BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
-                                             int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kSlot, SmiType(zone));
-  function->InitParameter(kVector, AnyTagged(zone));
-  return function;
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kSlot, kVector
+  MachineType machine_types[] = {MachineType::TaggedPointer(),
+                                 MachineType::TaggedSigned(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-ArrayNoArgumentConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kAllocationSite, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  function->InitParameter(kFunctionParameter, AnyTagged(zone));
-  return function;
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
+  MachineType machine_types[] = {MachineType::TaggedPointer(),
+                                 MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType* ArraySingleArgumentConstructorDescriptor::
-    BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
-                                             int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kAllocationSite, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  function->InitParameter(kFunctionParameter, AnyTagged(zone));
-  function->InitParameter(kArraySizeSmiParameter, AnyTagged(zone));
-  return function;
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter,
+  // kArraySizeSmiParameter
+  MachineType machine_types[] = {
+      MachineType::TaggedPointer(), MachineType::AnyTagged(),
+      MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-ArrayNArgumentsConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kAllocationSite, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  return function;
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kAllocationSite, kActualArgumentsCount
+  MachineType machine_types[] = {MachineType::TaggedPointer(),
+                                 MachineType::AnyTagged(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kFunction, Type::Receiver());
-  function->InitParameter(kNewTarget, AnyTagged(zone));
-  function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
-  function->InitParameter(kExpectedArgumentsCount, UntaggedIntegral32(zone));
-  return function;
+void ArgumentAdaptorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kNewTarget, kActualArgumentsCount, kExpectedArgumentsCount
+  MachineType machine_types[] = {MachineType::TaggedPointer(),
+                                 MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-CallInterfaceDescriptor ApiCallbackDescriptorBase::ForArgs(Isolate* isolate,
-                                                           int argc) {
-  switch (argc) {
-    case 0:
-      return ApiCallbackWith0ArgsDescriptor(isolate);
-    case 1:
-      return ApiCallbackWith1ArgsDescriptor(isolate);
-    case 2:
-      return ApiCallbackWith2ArgsDescriptor(isolate);
-    case 3:
-      return ApiCallbackWith3ArgsDescriptor(isolate);
-    case 4:
-      return ApiCallbackWith4ArgsDescriptor(isolate);
-    case 5:
-      return ApiCallbackWith5ArgsDescriptor(isolate);
-    case 6:
-      return ApiCallbackWith6ArgsDescriptor(isolate);
-    case 7:
-      return ApiCallbackWith7ArgsDescriptor(isolate);
-    default:
-      UNREACHABLE();
-      return VoidDescriptor(isolate);
-  }
+void ApiCallbackDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kFunction, kCallData, kHolder, kApiFunctionAddress
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::AnyTagged(), MachineType::Pointer()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-ApiCallbackDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
-    Isolate* isolate, int parameter_count, int argc) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function = Type::Function(AnyTagged(zone), Type::Undefined(),
-                                          kParameterCount + argc, zone)
-                               ->AsFunction();
-  function->InitParameter(kFunction, AnyTagged(zone));
-  function->InitParameter(kCallData, AnyTagged(zone));
-  function->InitParameter(kHolder, AnyTagged(zone));
-  function->InitParameter(kApiFunctionAddress, ExternalPointer(zone));
-  for (int i = 0; i < argc; i++) {
-    function->InitParameter(i, AnyTagged(zone));
-  }
-  return function;
+void InterpreterDispatchDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
+      MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
-FunctionType*
-InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int parameter_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
-          ->AsFunction();
-  function->InitParameter(kAccumulator, AnyTagged(zone));
-  function->InitParameter(kBytecodeOffset, UntaggedIntegral32(zone));
-  function->InitParameter(kBytecodeArray, AnyTagged(zone));
-  function->InitParameter(kDispatchTable, AnyTagged(zone));
-  return function;
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kNumberOfArguments, kFirstArgument, kFunction
+  MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kNumberOfArguments, kNewTarget, kConstructor, kFeedbackElement,
+  // kFirstArgument
+  MachineType machine_types[] = {
+      MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged(),
+      MachineType::AnyTagged(), MachineType::Pointer()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::
+    InitializePlatformIndependent(CallInterfaceDescriptorData* data) {
+  // kNumberOfArguments, kFunction, kFeedbackElement, kFirstArgument
+  MachineType machine_types[] = {MachineType::Int32(), MachineType::AnyTagged(),
+                                 MachineType::AnyTagged(),
+                                 MachineType::Pointer()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void InterpreterCEntryDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kNumberOfArguments, kFirstArgument, kFunctionEntry
+  MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
+                                 MachineType::Pointer()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
 }
 
 }  // namespace internal
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index af59bdb..09dc377 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -15,137 +15,130 @@
 
 class PlatformInterfaceDescriptor;
 
-#define INTERFACE_DESCRIPTOR_LIST(V)   \
-  V(Void)                              \
-  V(ContextOnly)                       \
-  V(OnStackWith1Args)                  \
-  V(OnStackWith2Args)                  \
-  V(OnStackWith3Args)                  \
-  V(OnStackWith4Args)                  \
-  V(OnStackWith5Args)                  \
-  V(OnStackWith6Args)                  \
-  V(OnStackWith7Args)                  \
-  V(Load)                              \
-  V(LoadWithVector)                    \
-  V(LoadGlobal)                        \
-  V(LoadGlobalWithVector)              \
-  V(Store)                             \
-  V(StoreWithVector)                   \
-  V(StoreTransition)                   \
-  V(VectorStoreTransition)             \
-  V(VarArgFunction)                    \
-  V(FastNewClosure)                    \
-  V(FastNewFunctionContext)            \
-  V(FastNewObject)                     \
-  V(FastNewRestParameter)              \
-  V(FastNewSloppyArguments)            \
-  V(FastNewStrictArguments)            \
-  V(TypeConversion)                    \
-  V(Typeof)                            \
-  V(FastCloneRegExp)                   \
-  V(FastCloneShallowArray)             \
-  V(FastCloneShallowObject)            \
-  V(CreateAllocationSite)              \
-  V(CreateWeakCell)                    \
-  V(CallFunction)                      \
-  V(CallFunctionWithFeedback)          \
-  V(CallFunctionWithFeedbackAndVector) \
-  V(CallConstruct)                     \
-  V(CallTrampoline)                    \
-  V(ConstructStub)                     \
-  V(ConstructTrampoline)               \
-  V(RegExpConstructResult)             \
-  V(CopyFastSmiOrObjectElements)       \
-  V(TransitionElementsKind)            \
-  V(AllocateHeapNumber)                \
-  V(AllocateFloat32x4)                 \
-  V(AllocateInt32x4)                   \
-  V(AllocateUint32x4)                  \
-  V(AllocateBool32x4)                  \
-  V(AllocateInt16x8)                   \
-  V(AllocateUint16x8)                  \
-  V(AllocateBool16x8)                  \
-  V(AllocateInt8x16)                   \
-  V(AllocateUint8x16)                  \
-  V(AllocateBool8x16)                  \
-  V(ArrayNoArgumentConstructor)        \
-  V(ArraySingleArgumentConstructor)    \
-  V(ArrayNArgumentsConstructor)        \
-  V(Compare)                           \
-  V(BinaryOp)                          \
-  V(BinaryOpWithAllocationSite)        \
-  V(BinaryOpWithVector)                \
-  V(CountOp)                           \
-  V(StringAdd)                         \
-  V(StringCompare)                     \
-  V(Keyed)                             \
-  V(Named)                             \
-  V(HasProperty)                       \
-  V(ForInFilter)                       \
-  V(GetProperty)                       \
-  V(CallHandler)                       \
-  V(ArgumentAdaptor)                   \
-  V(ApiCallbackWith0Args)              \
-  V(ApiCallbackWith1Args)              \
-  V(ApiCallbackWith2Args)              \
-  V(ApiCallbackWith3Args)              \
-  V(ApiCallbackWith4Args)              \
-  V(ApiCallbackWith5Args)              \
-  V(ApiCallbackWith6Args)              \
-  V(ApiCallbackWith7Args)              \
-  V(ApiGetter)                         \
-  V(StoreGlobalViaContext)             \
-  V(MathPowTagged)                     \
-  V(MathPowInteger)                    \
-  V(GrowArrayElements)                 \
-  V(InterpreterDispatch)               \
-  V(InterpreterPushArgsAndCall)        \
-  V(InterpreterPushArgsAndConstruct)   \
-  V(InterpreterCEntry)                 \
+#define INTERFACE_DESCRIPTOR_LIST(V)      \
+  V(Void)                                 \
+  V(ContextOnly)                          \
+  V(Load)                                 \
+  V(LoadWithVector)                       \
+  V(LoadGlobal)                           \
+  V(LoadGlobalWithVector)                 \
+  V(Store)                                \
+  V(StoreWithVector)                      \
+  V(StoreNamedTransition)                 \
+  V(StoreTransition)                      \
+  V(VarArgFunction)                       \
+  V(FastNewClosure)                       \
+  V(FastNewFunctionContext)               \
+  V(FastNewObject)                        \
+  V(FastNewRestParameter)                 \
+  V(FastNewSloppyArguments)               \
+  V(FastNewStrictArguments)               \
+  V(TypeConversion)                       \
+  V(Typeof)                               \
+  V(FastCloneRegExp)                      \
+  V(FastCloneShallowArray)                \
+  V(FastCloneShallowObject)               \
+  V(CreateAllocationSite)                 \
+  V(CreateWeakCell)                       \
+  V(CallFunction)                         \
+  V(CallFunctionWithFeedback)             \
+  V(CallFunctionWithFeedbackAndVector)    \
+  V(CallConstruct)                        \
+  V(CallTrampoline)                       \
+  V(ConstructStub)                        \
+  V(ConstructTrampoline)                  \
+  V(RegExpExec)                           \
+  V(RegExpConstructResult)                \
+  V(CopyFastSmiOrObjectElements)          \
+  V(TransitionElementsKind)               \
+  V(AllocateHeapNumber)                   \
+  V(AllocateFloat32x4)                    \
+  V(AllocateInt32x4)                      \
+  V(AllocateUint32x4)                     \
+  V(AllocateBool32x4)                     \
+  V(AllocateInt16x8)                      \
+  V(AllocateUint16x8)                     \
+  V(AllocateBool16x8)                     \
+  V(AllocateInt8x16)                      \
+  V(AllocateUint8x16)                     \
+  V(AllocateBool8x16)                     \
+  V(ArrayNoArgumentConstructor)           \
+  V(ArraySingleArgumentConstructor)       \
+  V(ArrayNArgumentsConstructor)           \
+  V(Compare)                              \
+  V(BinaryOp)                             \
+  V(BinaryOpWithAllocationSite)           \
+  V(BinaryOpWithVector)                   \
+  V(CountOp)                              \
+  V(StringAdd)                            \
+  V(StringCompare)                        \
+  V(SubString)                            \
+  V(Keyed)                                \
+  V(Named)                                \
+  V(HasProperty)                          \
+  V(ForInFilter)                          \
+  V(GetProperty)                          \
+  V(CallHandler)                          \
+  V(ArgumentAdaptor)                      \
+  V(ApiCallback)                          \
+  V(ApiGetter)                            \
+  V(StoreGlobalViaContext)                \
+  V(MathPowTagged)                        \
+  V(MathPowInteger)                       \
+  V(GrowArrayElements)                    \
+  V(InterpreterDispatch)                  \
+  V(InterpreterPushArgsAndCall)           \
+  V(InterpreterPushArgsAndConstruct)      \
+  V(InterpreterPushArgsAndConstructArray) \
+  V(InterpreterCEntry)                    \
   V(ResumeGenerator)
 
 class CallInterfaceDescriptorData {
  public:
-  CallInterfaceDescriptorData()
-      : register_param_count_(-1), function_type_(nullptr) {}
+  CallInterfaceDescriptorData() : register_param_count_(-1), param_count_(-1) {}
 
   // A copy of the passed in registers and param_representations is made
   // and owned by the CallInterfaceDescriptorData.
 
-  void InitializePlatformIndependent(FunctionType* function_type) {
-    function_type_ = function_type;
-  }
-
-  // TODO(mvstanton): Instead of taking parallel arrays register and
-  // param_representations, how about a struct that puts the representation
-  // and register side by side (eg, RegRep(r1, Representation::Tagged()).
-  // The same should go for the CodeStubDescriptor class.
   void InitializePlatformSpecific(
       int register_parameter_count, const Register* registers,
       PlatformInterfaceDescriptor* platform_descriptor = NULL);
 
-  bool IsInitialized() const { return register_param_count_ >= 0; }
+  // if machine_types is null, then an array of size
+  // (register_parameter_count + extra_parameter_count) will be created
+  // with MachineType::AnyTagged() for each member.
+  //
+  // if machine_types is not null, then it should be of the size
+  // register_parameter_count. Those members of the parameter array
+  // will be initialized from {machine_types}, and the rest initialized
+  // to MachineType::AnyTagged().
+  void InitializePlatformIndependent(int parameter_count,
+                                     int extra_parameter_count,
+                                     const MachineType* machine_types);
 
-  int param_count() const { return function_type_->Arity(); }
+  bool IsInitialized() const {
+    return register_param_count_ >= 0 && param_count_ >= 0;
+  }
+
+  int param_count() const { return param_count_; }
   int register_param_count() const { return register_param_count_; }
   Register register_param(int index) const { return register_params_[index]; }
   Register* register_params() const { return register_params_.get(); }
-  Type* param_type(int index) const { return function_type_->Parameter(index); }
+  MachineType param_type(int index) const { return machine_types_[index]; }
   PlatformInterfaceDescriptor* platform_specific_descriptor() const {
     return platform_specific_descriptor_;
   }
 
  private:
   int register_param_count_;
+  int param_count_;
 
   // The Register params are allocated dynamically by the
   // InterfaceDescriptor, and freed on destruction. This is because static
   // arrays of Registers cause creation of runtime static initializers
   // which we don't want.
   std::unique_ptr<Register[]> register_params_;
-
-  // Specifies types for parameters and return
-  FunctionType* function_type_;
+  std::unique_ptr<MachineType[]> machine_types_;
 
   PlatformInterfaceDescriptor* platform_specific_descriptor_;
 
@@ -186,7 +179,7 @@
     return data()->register_param(index);
   }
 
-  Type* GetParameterType(int index) const {
+  MachineType GetParameterType(int index) const {
     DCHECK(index < data()->param_count());
     return data()->param_type(index);
   }
@@ -200,21 +193,18 @@
 
   const char* DebugName(Isolate* isolate) const;
 
-  static FunctionType* BuildDefaultFunctionType(Isolate* isolate,
-                                                int parameter_count);
-
  protected:
   const CallInterfaceDescriptorData* data() const { return data_; }
 
-  virtual FunctionType* BuildCallInterfaceDescriptorFunctionType(
-      Isolate* isolate, int register_param_count) {
-    return BuildDefaultFunctionType(isolate, register_param_count);
-  }
-
   virtual void InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
     UNREACHABLE();
   }
 
+  virtual void InitializePlatformIndependent(
+      CallInterfaceDescriptorData* data) {
+    data->InitializePlatformIndependent(data->register_param_count(), 0, NULL);
+  }
+
   void Initialize(Isolate* isolate, CallDescriptors::Key key) {
     if (!data()->IsInitialized()) {
       // We should only initialize descriptors on the isolate's main thread.
@@ -222,9 +212,7 @@
       CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
       DCHECK(d == data());  // d should be a modifiable pointer to data().
       InitializePlatformSpecific(d);
-      FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
-          isolate, d->register_param_count());
-      d->InitializePlatformIndependent(function_type);
+      InitializePlatformIndependent(d);
     }
   }
 
@@ -264,23 +252,26 @@
                                                                                \
  public:
 
-#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
-  DECLARE_DESCRIPTOR(name, base)                                 \
- protected:                                                      \
-  FunctionType* BuildCallInterfaceDescriptorFunctionType(        \
-      Isolate* isolate, int register_param_count) override;      \
-                                                                 \
+#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base)        \
+  DECLARE_DESCRIPTOR(name, base)                                        \
+ protected:                                                             \
+  void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+      override;                                                         \
+                                                                        \
  public:
 
-#define DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(name, base, arg) \
-  DECLARE_DESCRIPTOR_WITH_BASE(name, base)                                  \
- protected:                                                                 \
-  FunctionType* BuildCallInterfaceDescriptorFunctionType(                   \
-      Isolate* isolate, int register_param_count) override {                \
-    return BuildCallInterfaceDescriptorFunctionTypeWithArg(                 \
-        isolate, register_param_count, arg);                                \
-  }                                                                         \
-                                                                            \
+#define DECLARE_DESCRIPTOR_WITH_STACK_ARGS(name, base)                  \
+  DECLARE_DESCRIPTOR_WITH_BASE(name, base)                              \
+ protected:                                                             \
+  void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+      override {                                                        \
+    data->InitializePlatformIndependent(0, kParameterCount, NULL);      \
+  }                                                                     \
+  void InitializePlatformSpecific(CallInterfaceDescriptorData* data)    \
+      override {                                                        \
+    data->InitializePlatformSpecific(0, nullptr);                       \
+  }                                                                     \
+                                                                        \
  public:
 
 #define DEFINE_PARAMETERS(...)                          \
@@ -301,73 +292,6 @@
   DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
 };
 
-// The OnStackWith*ArgsDescriptors have a lot of boilerplate. The superclass
-// OnStackArgsDescriptorBase is not meant to be instantiated directly and has no
-// public constructors to ensure this is so.contains all the logic, and the
-//
-// Use OnStackArgsDescriptorBase::ForArgs(isolate, parameter_count) to
-// instantiate a descriptor with the number of args.
-class OnStackArgsDescriptorBase : public CallInterfaceDescriptor {
- public:
-  static CallInterfaceDescriptor ForArgs(Isolate* isolate, int parameter_count);
-
- protected:
-  OnStackArgsDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
-      : CallInterfaceDescriptor(isolate, key) {}
-  void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
-  FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
-      Isolate* isolate, int register_parameter_count, int parameter_count);
-};
-
-class OnStackWith1ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith1ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     1)
-};
-
-class OnStackWith2ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith2ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     2)
-};
-
-class OnStackWith3ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith3ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     3)
-};
-
-class OnStackWith4ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith4ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     4)
-};
-
-class OnStackWith5ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith5ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     5)
-};
-
-class OnStackWith6ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith6ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     6)
-};
-
-class OnStackWith7ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith7ArgsDescriptor,
-                                                     OnStackArgsDescriptorBase,
-                                                     7)
-};
-
 // LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
 class LoadDescriptor : public CallInterfaceDescriptor {
  public:
@@ -401,42 +325,47 @@
   static const Register NameRegister();
   static const Register ValueRegister();
   static const Register SlotRegister();
-};
 
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+  static const bool kPassLastArgsOnStack = true;
+#else
+  static const bool kPassLastArgsOnStack = false;
+#endif
+
+  // Pass value and slot through the stack.
+  static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+};
 
 class StoreTransitionDescriptor : public StoreDescriptor {
  public:
-  DEFINE_PARAMETERS(kReceiver, kName, kValue, kMap)
+  DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreTransitionDescriptor,
                                                StoreDescriptor)
 
   static const Register MapRegister();
-};
-
-
-class VectorStoreTransitionDescriptor : public StoreDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VectorStoreTransitionDescriptor,
-                                               StoreDescriptor)
-
-  // TODO(ishell): use DEFINE_PARAMETERS macro here
-  // Extends StoreDescriptor with Map parameter.
-  enum ParameterIndices {
-    kReceiver = 0,
-    kName = 1,
-    kValue = 2,
-
-    kMap = 3,
-
-    kSlot = 4,  // not present on ia32.
-    kVirtualSlotVector = 4,
-
-    kVector = 5
-  };
-
-  static const Register MapRegister();
   static const Register SlotRegister();
   static const Register VectorRegister();
+
+  // Pass value, slot and vector through the stack.
+  static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+};
+
+class StoreNamedTransitionDescriptor : public StoreTransitionDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector,
+                    kName)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreNamedTransitionDescriptor,
+                                               StoreTransitionDescriptor)
+
+  // Always pass name on the stack.
+  static const bool kPassLastArgsOnStack = true;
+  static const int kStackArgumentsCount =
+      StoreTransitionDescriptor::kStackArgumentsCount + 1;
+
+  static const Register NameRegister() { return no_reg; }
+  static const Register FieldOffsetRegister() {
+    return StoreTransitionDescriptor::NameRegister();
+  }
 };
 
 class StoreWithVectorDescriptor : public StoreDescriptor {
@@ -446,6 +375,9 @@
                                                StoreDescriptor)
 
   static const Register VectorRegister();
+
+  // Pass value, slot and vector through the stack.
+  static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
 };
 
 class LoadWithVectorDescriptor : public LoadDescriptor {
@@ -632,6 +564,12 @@
   DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
 };
 
+class RegExpExecDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kRegExpObject, kString, kPreviousIndex, kLastMatchInfo)
+  DECLARE_DESCRIPTOR_WITH_STACK_ARGS(RegExpExecDescriptor,
+                                     CallInterfaceDescriptor)
+};
 
 class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
  public:
@@ -751,6 +689,13 @@
   static const Register RightRegister();
 };
 
+class SubStringDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kString, kFrom, kTo)
+  DECLARE_DESCRIPTOR_WITH_STACK_ARGS(SubStringDescriptor,
+                                     CallInterfaceDescriptor)
+};
+
 // TODO(ishell): not used, remove.
 class KeyedDescriptor : public CallInterfaceDescriptor {
  public:
@@ -778,79 +723,13 @@
                                                CallInterfaceDescriptor)
 };
 
-// The ApiCallback*Descriptors have a lot of boilerplate. The superclass
-// ApiCallbackDescriptorBase contains all the logic, and the
-// ApiCallbackWith*ArgsDescriptor merely instantiate these with a
-// parameter for the number of args.
-//
-// The base class is not meant to be instantiated directly and has no
-// public constructors to ensure this is so.
-//
-// The simplest usage for all the ApiCallback*Descriptors is probably
-//   ApiCallbackDescriptorBase::ForArgs(isolate, argc)
-//
-class ApiCallbackDescriptorBase : public CallInterfaceDescriptor {
+class ApiCallbackDescriptor : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kFunction, kCallData, kHolder, kApiFunctionAddress)
-  static CallInterfaceDescriptor ForArgs(Isolate* isolate, int argc);
-
- protected:
-  ApiCallbackDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
-      : CallInterfaceDescriptor(isolate, key) {}
-  void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
-  FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
-      Isolate* isolate, int parameter_count, int argc);
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiCallbackDescriptor,
+                                               CallInterfaceDescriptor)
 };
 
-class ApiCallbackWith0ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith0ArgsDescriptor, ApiCallbackDescriptorBase, 0)
-};
-
-class ApiCallbackWith1ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith1ArgsDescriptor, ApiCallbackDescriptorBase, 1)
-};
-
-class ApiCallbackWith2ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith2ArgsDescriptor, ApiCallbackDescriptorBase, 2)
-};
-
-class ApiCallbackWith3ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith3ArgsDescriptor, ApiCallbackDescriptorBase, 3)
-};
-
-class ApiCallbackWith4ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith4ArgsDescriptor, ApiCallbackDescriptorBase, 4)
-};
-
-class ApiCallbackWith5ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith5ArgsDescriptor, ApiCallbackDescriptorBase, 5)
-};
-
-class ApiCallbackWith6ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith6ArgsDescriptor, ApiCallbackDescriptorBase, 6)
-};
-
-class ApiCallbackWith7ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
-  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
-      ApiCallbackWith7ArgsDescriptor, ApiCallbackDescriptorBase, 7)
-};
-
-
 class ApiGetterDescriptor : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
@@ -904,22 +783,35 @@
 
 class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(InterpreterPushArgsAndCallDescriptor,
-                     CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+      InterpreterPushArgsAndCallDescriptor, CallInterfaceDescriptor)
 };
 
 
 class InterpreterPushArgsAndConstructDescriptor
     : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(InterpreterPushArgsAndConstructDescriptor,
-                     CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kNumberOfArguments, kNewTarget, kConstructor,
+                    kFeedbackElement, kFirstArgument)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+      InterpreterPushArgsAndConstructDescriptor, CallInterfaceDescriptor)
 };
 
+class InterpreterPushArgsAndConstructArrayDescriptor
+    : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kNumberOfArguments, kFunction, kFeedbackElement,
+                    kFirstArgument)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+      InterpreterPushArgsAndConstructArrayDescriptor, CallInterfaceDescriptor)
+};
 
 class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunctionEntry)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterCEntryDescriptor,
+                                               CallInterfaceDescriptor)
 };
 
 class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
diff --git a/src/interpreter/OWNERS b/src/interpreter/OWNERS
index d12fcf9..4e6a721 100644
--- a/src/interpreter/OWNERS
+++ b/src/interpreter/OWNERS
@@ -3,5 +3,4 @@
 bmeurer@chromium.org
 mstarzinger@chromium.org
 mythria@chromium.org
-oth@chromium.org
 rmcilroy@chromium.org
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 9bef5a5..dfa3950 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -4,7 +4,6 @@
 
 #include "src/interpreter/bytecode-array-builder.h"
 
-#include "src/compiler.h"
 #include "src/globals.h"
 #include "src/interpreter/bytecode-array-writer.h"
 #include "src/interpreter/bytecode-dead-code-optimizer.h"
@@ -29,7 +28,7 @@
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
-      temporary_allocator_(zone, fixed_register_count()),
+      register_allocator_(fixed_register_count()),
       bytecode_array_writer_(zone, &constant_array_builder_,
                              source_position_mode),
       pipeline_(&bytecode_array_writer_) {
@@ -47,7 +46,8 @@
 
   if (FLAG_ignition_reo) {
     pipeline_ = new (zone) BytecodeRegisterOptimizer(
-        zone, &temporary_allocator_, parameter_count, pipeline_);
+        zone, &register_allocator_, fixed_register_count(), parameter_count,
+        pipeline_);
   }
 
   return_position_ =
@@ -70,10 +70,6 @@
   return Register::FromParameterIndex(parameter_index, parameter_count());
 }
 
-bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
-  return reg.is_parameter() || reg.index() < locals_count();
-}
-
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
   DCHECK(return_seen_in_block_);
   DCHECK(!bytecode_generated_);
@@ -81,86 +77,121 @@
 
   Handle<FixedArray> handler_table =
       handler_table_builder()->ToHandlerTable(isolate);
-  return pipeline_->ToBytecodeArray(isolate, fixed_register_count(),
+  return pipeline_->ToBytecodeArray(isolate, total_register_count(),
                                     parameter_count(), handler_table);
 }
 
-namespace {
-
-static bool ExpressionPositionIsNeeded(Bytecode bytecode) {
-  // An expression position is always needed if filtering is turned
-  // off. Otherwise an expression is only needed if the bytecode has
-  // external side effects.
-  return !FLAG_ignition_filter_expression_positions ||
-         !Bytecodes::IsWithoutExternalSideEffects(bytecode);
-}
-
-}  // namespace
-
-void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
-  if (latest_source_info_.is_valid()) {
-    // Statement positions need to be emitted immediately.  Expression
-    // positions can be pushed back until a bytecode is found that can
-    // throw. Hence we only invalidate the existing source position
-    // information if it is used.
-    if (latest_source_info_.is_statement() ||
-        ExpressionPositionIsNeeded(node->bytecode())) {
-      node->source_info().Clone(latest_source_info_);
-      latest_source_info_.set_invalid();
-    }
-  }
-}
-
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
                                   uint32_t operand1, uint32_t operand2,
                                   uint32_t operand3) {
   DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
-  AttachSourceInfo(&node);
+  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
+                    &latest_source_info_);
   pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
                                   uint32_t operand1, uint32_t operand2) {
   DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
-  BytecodeNode node(bytecode, operand0, operand1, operand2);
-  AttachSourceInfo(&node);
+  BytecodeNode node(bytecode, operand0, operand1, operand2,
+                    &latest_source_info_);
   pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
                                   uint32_t operand1) {
   DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
-  BytecodeNode node(bytecode, operand0, operand1);
-  AttachSourceInfo(&node);
+  BytecodeNode node(bytecode, operand0, operand1, &latest_source_info_);
   pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
   DCHECK(OperandsAreValid(bytecode, 1, operand0));
-  BytecodeNode node(bytecode, operand0);
-  AttachSourceInfo(&node);
+  BytecodeNode node(bytecode, operand0, &latest_source_info_);
   pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
   DCHECK(OperandsAreValid(bytecode, 0));
-  BytecodeNode node(bytecode);
-  AttachSourceInfo(&node);
+  BytecodeNode node(bytecode, &latest_source_info_);
   pipeline()->Write(&node);
 }
 
+void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, BytecodeLabel* label) {
+  BytecodeNode node(bytecode, 0, &latest_source_info_);
+  pipeline_->WriteJump(&node, label);
+  LeaveBasicBlock();
+}
+
+void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, uint32_t operand0,
+                                      BytecodeLabel* label) {
+  BytecodeNode node(bytecode, 0, operand0, &latest_source_info_);
+  pipeline_->WriteJump(&node, label);
+  LeaveBasicBlock();
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg,
                                                             int feedback_slot) {
-  Output(BytecodeForBinaryOperation(op), RegisterOperand(reg),
-         UnsignedOperand(feedback_slot));
+  switch (op) {
+    case Token::Value::ADD:
+      Output(Bytecode::kAdd, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::SUB:
+      Output(Bytecode::kSub, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::MUL:
+      Output(Bytecode::kMul, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::DIV:
+      Output(Bytecode::kDiv, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::MOD:
+      Output(Bytecode::kMod, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::BIT_OR:
+      Output(Bytecode::kBitwiseOr, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::BIT_XOR:
+      Output(Bytecode::kBitwiseXor, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::BIT_AND:
+      Output(Bytecode::kBitwiseAnd, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::SHL:
+      Output(Bytecode::kShiftLeft, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::SAR:
+      Output(Bytecode::kShiftRight, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::SHR:
+      Output(Bytecode::kShiftRightLogical, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    default:
+      UNREACHABLE();
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
                                                            int feedback_slot) {
-  Output(BytecodeForCountOperation(op), UnsignedOperand(feedback_slot));
+  if (op == Token::Value::ADD) {
+    Output(Bytecode::kInc, UnsignedOperand(feedback_slot));
+  } else {
+    DCHECK_EQ(op, Token::Value::SUB);
+    Output(Bytecode::kDec, UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
@@ -169,15 +200,51 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
   Output(Bytecode::kTypeOf);
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
-                                                             Register reg) {
-  Output(BytecodeForCompareOperation(op), RegisterOperand(reg));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
+    Token::Value op, Register reg, int feedback_slot) {
+  switch (op) {
+    case Token::Value::EQ:
+      Output(Bytecode::kTestEqual, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::NE:
+      Output(Bytecode::kTestNotEqual, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::EQ_STRICT:
+      Output(Bytecode::kTestEqualStrict, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::LT:
+      Output(Bytecode::kTestLessThan, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::GT:
+      Output(Bytecode::kTestGreaterThan, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::LTE:
+      Output(Bytecode::kTestLessThanOrEqual, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::GTE:
+      Output(Bytecode::kTestGreaterThanOrEqual, RegisterOperand(reg),
+             UnsignedOperand(feedback_slot));
+      break;
+    case Token::Value::INSTANCEOF:
+      Output(Bytecode::kTestInstanceOf, RegisterOperand(reg));
+      break;
+    case Token::Value::IN:
+      Output(Bytecode::kTestIn, RegisterOperand(reg));
+      break;
+    default:
+      UNREACHABLE();
+  }
   return *this;
 }
 
@@ -250,50 +317,90 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
                                                        TypeofMode typeof_mode) {
-  // TODO(rmcilroy): Potentially store typeof information in an
-  // operand rather than having extra bytecodes.
-  Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
-  Output(bytecode, UnsignedOperand(feedback_slot));
+  if (typeof_mode == INSIDE_TYPEOF) {
+    Output(Bytecode::kLdaGlobalInsideTypeof, feedback_slot);
+  } else {
+    DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
+    Output(Bytecode::kLdaGlobal, UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  if (language_mode == SLOPPY) {
+    Output(Bytecode::kStaGlobalSloppy, UnsignedOperand(name_index),
+           UnsignedOperand(feedback_slot));
+  } else {
+    DCHECK_EQ(language_mode, STRICT);
+    Output(Bytecode::kStaGlobalStrict, UnsignedOperand(name_index),
+           UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
-                                                            int slot_index) {
+                                                            int slot_index,
+                                                            int depth) {
   Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
-         UnsignedOperand(slot_index));
+         UnsignedOperand(slot_index), UnsignedOperand(depth));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
-                                                             int slot_index) {
+                                                             int slot_index,
+                                                             int depth) {
   Output(Bytecode::kStaContextSlot, RegisterOperand(context),
-         UnsignedOperand(slot_index));
+         UnsignedOperand(slot_index), UnsignedOperand(depth));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
     const Handle<String> name, TypeofMode typeof_mode) {
-  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
-                          ? Bytecode::kLdaLookupSlotInsideTypeof
-                          : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, UnsignedOperand(name_index));
+  if (typeof_mode == INSIDE_TYPEOF) {
+    Output(Bytecode::kLdaLookupSlotInsideTypeof, UnsignedOperand(name_index));
+  } else {
+    DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
+    Output(Bytecode::kLdaLookupSlot, UnsignedOperand(name_index));
+  }
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
+    const Handle<String> name, TypeofMode typeof_mode, int slot_index,
+    int depth) {
+  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+                          ? Bytecode::kLdaLookupContextSlotInsideTypeof
+                          : Bytecode::kLdaLookupContextSlot;
+  size_t name_index = GetConstantPoolEntry(name);
+  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(slot_index),
+         UnsignedOperand(depth));
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
+    const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
+    int depth) {
+  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+                          ? Bytecode::kLdaLookupGlobalSlotInsideTypeof
+                          : Bytecode::kLdaLookupGlobalSlot;
+  size_t name_index = GetConstantPoolEntry(name);
+  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot),
+         UnsignedOperand(depth));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
     const Handle<String> name, LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, UnsignedOperand(name_index));
+  if (language_mode == SLOPPY) {
+    Output(Bytecode::kStaLookupSlotSloppy, UnsignedOperand(name_index));
+  } else {
+    DCHECK_EQ(language_mode, STRICT);
+    Output(Bytecode::kStaLookupSlotStrict, UnsignedOperand(name_index));
+  }
   return *this;
 }
 
@@ -315,19 +422,29 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreNamedProperty(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  Output(bytecode, RegisterOperand(object), UnsignedOperand(name_index),
-         UnsignedOperand(feedback_slot));
+  if (language_mode == SLOPPY) {
+    Output(Bytecode::kStaNamedPropertySloppy, RegisterOperand(object),
+           UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  } else {
+    DCHECK_EQ(language_mode, STRICT);
+    Output(Bytecode::kStaNamedPropertyStrict, RegisterOperand(object),
+           UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreKeyedProperty(language_mode);
-  Output(bytecode, RegisterOperand(object), RegisterOperand(key),
-         UnsignedOperand(feedback_slot));
+  if (language_mode == SLOPPY) {
+    Output(Bytecode::kStaKeyedPropertySloppy, RegisterOperand(object),
+           RegisterOperand(key), UnsignedOperand(feedback_slot));
+  } else {
+    DCHECK_EQ(language_mode, STRICT);
+    Output(Bytecode::kStaKeyedPropertyStrict, RegisterOperand(object),
+           RegisterOperand(key), UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
@@ -346,10 +463,11 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
-    Register exception, Handle<String> name) {
+    Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
   size_t name_index = GetConstantPoolEntry(name);
+  size_t scope_info_index = GetConstantPoolEntry(scope_info);
   Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
-         UnsignedOperand(name_index));
+         UnsignedOperand(name_index), UnsignedOperand(scope_info_index));
   return *this;
 }
 
@@ -358,18 +476,29 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(Register object) {
-    Output(Bytecode::kCreateWithContext, RegisterOperand(object));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
+    Register object, Handle<ScopeInfo> scope_info) {
+  size_t scope_info_index = GetConstantPoolEntry(scope_info);
+  Output(Bytecode::kCreateWithContext, RegisterOperand(object),
+         UnsignedOperand(scope_info_index));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
     CreateArgumentsType type) {
-  // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
-  // than having two different bytecodes once we have better support for
-  // branches in the InterpreterAssembler.
-  Bytecode bytecode = BytecodeForCreateArguments(type);
-  Output(bytecode);
+  switch (type) {
+    case CreateArgumentsType::kMappedArguments:
+      Output(Bytecode::kCreateMappedArguments);
+      break;
+    case CreateArgumentsType::kUnmappedArguments:
+      Output(Bytecode::kCreateUnmappedArguments);
+      break;
+    case CreateArgumentsType::kRestParameter:
+      Output(Bytecode::kCreateRestParameter);
+      break;
+    default:
+      UNREACHABLE();
+  }
   return *this;
 }
 
@@ -411,19 +540,19 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
     Register out) {
   Output(Bytecode::kToObject, RegisterOperand(out));
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
     Register out) {
   Output(Bytecode::kToName, RegisterOperand(out));
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
     Register out) {
   Output(Bytecode::kToNumber, RegisterOperand(out));
   return *this;
@@ -442,43 +571,44 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
-                                                       BytecodeLabel* label) {
-  BytecodeNode node(jump_bytecode, 0);
-  AttachSourceInfo(&node);
-  pipeline_->WriteJump(&node, label);
-  LeaveBasicBlock();
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJump, label);
+  OutputJump(Bytecode::kJump, label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
   // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
   // to JumpIfTrue.
-  return OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+  OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
-  // The peephole optimizer attempts to simplify JumpIfToBooleanFalse
-  // to JumpIfFalse.
-  return OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+  OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJumpIfNull, label);
+  OutputJump(Bytecode::kJumpIfNull, label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
     BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJumpIfUndefined, label);
+  OutputJump(Bytecode::kJumpIfUndefined, label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
     BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJumpIfNotHole, label);
+  OutputJump(Bytecode::kJumpIfNotHole, label);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
+                                                     int loop_depth) {
+  OutputJump(Bytecode::kJumpLoop, UnsignedOperand(loop_depth), label);
+  return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
@@ -499,11 +629,6 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::OsrPoll(int loop_depth) {
-  Output(Bytecode::kOsrPoll, UnsignedOperand(loop_depth));
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
   Output(Bytecode::kThrow);
   return *this;
@@ -527,24 +652,27 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
-    Register receiver, Register cache_info_triple) {
+    Register receiver, RegisterList cache_info_triple) {
+  DCHECK_EQ(3, cache_info_triple.register_count());
   Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
-         RegisterOperand(cache_info_triple));
+         RegisterOperand(cache_info_triple.first_register()));
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
-                                                      Register cache_length) {
-  Output(Bytecode::kForInDone, RegisterOperand(index),
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInContinue(
+    Register index, Register cache_length) {
+  Output(Bytecode::kForInContinue, RegisterOperand(index),
          RegisterOperand(cache_length));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
-    Register receiver, Register index, Register cache_type_array_pair,
+    Register receiver, Register index, RegisterList cache_type_array_pair,
     int feedback_slot) {
+  DCHECK_EQ(2, cache_type_array_pair.register_count());
   Output(Bytecode::kForInNext, RegisterOperand(receiver),
-         RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+         RegisterOperand(index),
+         RegisterOperand(cache_type_array_pair.first_register()),
          UnsignedOperand(feedback_slot));
   return *this;
 }
@@ -591,45 +719,39 @@
   return *this;
 }
 
-void BytecodeArrayBuilder::EnsureReturn() {
-  if (!return_seen_in_block_) {
-    LoadUndefined();
-    Return();
-  }
-  DCHECK(return_seen_in_block_);
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
-                                                 Register receiver_args,
-                                                 size_t receiver_args_count,
+                                                 RegisterList args,
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
-  Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  Output(bytecode, RegisterOperand(callable), RegisterOperand(receiver_args),
-         UnsignedOperand(receiver_args_count), UnsignedOperand(feedback_slot));
+  if (tail_call_mode == TailCallMode::kDisallow) {
+    Output(Bytecode::kCall, RegisterOperand(callable),
+           RegisterOperand(args.first_register()),
+           UnsignedOperand(args.register_count()),
+           UnsignedOperand(feedback_slot));
+  } else {
+    DCHECK(tail_call_mode == TailCallMode::kAllow);
+    Output(Bytecode::kTailCall, RegisterOperand(callable),
+           RegisterOperand(args.first_register()),
+           UnsignedOperand(args.register_count()),
+           UnsignedOperand(feedback_slot));
+  }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
-                                                Register first_arg,
-                                                size_t arg_count) {
-  if (!first_arg.is_valid()) {
-    DCHECK_EQ(0u, arg_count);
-    first_arg = Register(0);
-  }
+                                                RegisterList args,
+                                                int feedback_slot_id) {
   Output(Bytecode::kNew, RegisterOperand(constructor),
-         RegisterOperand(first_arg), UnsignedOperand(arg_count));
+         RegisterOperand(args.first_register()),
+         UnsignedOperand(args.register_count()),
+         UnsignedOperand(feedback_slot_id));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
-    Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+    Runtime::FunctionId function_id, RegisterList args) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
-  if (!first_arg.is_valid()) {
-    DCHECK_EQ(0u, arg_count);
-    first_arg = Register(0);
-  }
   Bytecode bytecode;
   uint32_t id;
   if (IntrinsicsHelper::IsSupported(function_id)) {
@@ -639,35 +761,56 @@
     bytecode = Bytecode::kCallRuntime;
     id = static_cast<uint32_t>(function_id);
   }
-  Output(bytecode, id, RegisterOperand(first_arg), UnsignedOperand(arg_count));
+  Output(bytecode, id, RegisterOperand(args.first_register()),
+         UnsignedOperand(args.register_count()));
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+    Runtime::FunctionId function_id, Register arg) {
+  return CallRuntime(function_id, RegisterList(arg.index(), 1));
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+    Runtime::FunctionId function_id) {
+  return CallRuntime(function_id, RegisterList());
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+    Runtime::FunctionId function_id, RegisterList args,
+    RegisterList return_pair) {
+  DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
+  DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+  DCHECK_EQ(2, return_pair.register_count());
+  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+         RegisterOperand(args.first_register()),
+         UnsignedOperand(args.register_count()),
+         RegisterOperand(return_pair.first_register()));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
-    Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
-    Register first_return) {
-  DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
-  if (!first_arg.is_valid()) {
-    DCHECK_EQ(0u, arg_count);
-    first_arg = Register(0);
-  }
-  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-         RegisterOperand(first_arg), UnsignedOperand(arg_count),
-         RegisterOperand(first_return));
-  return *this;
+    Runtime::FunctionId function_id, Register arg, RegisterList return_pair) {
+  return CallRuntimeForPair(function_id, RegisterList(arg.index(), 1),
+                            return_pair);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
-    int context_index, Register receiver_args, size_t receiver_args_count) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
+                                                          RegisterList args) {
   Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
-         RegisterOperand(receiver_args), UnsignedOperand(receiver_args_count));
+         RegisterOperand(args.first_register()),
+         UnsignedOperand(args.register_count()));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  Output(BytecodeForDelete(language_mode), RegisterOperand(object));
+  if (language_mode == SLOPPY) {
+    Output(Bytecode::kDeletePropertySloppy, RegisterOperand(object));
+  } else {
+    DCHECK_EQ(language_mode, STRICT);
+    Output(Bytecode::kDeletePropertyStrict, RegisterOperand(object));
+  }
   return *this;
 }
 
@@ -689,29 +832,6 @@
   latest_source_info_.MakeStatementPosition(return_position_);
 }
 
-void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
-  if (stmt->position() == kNoSourcePosition) return;
-  latest_source_info_.MakeStatementPosition(stmt->position());
-}
-
-void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
-  if (expr->position() == kNoSourcePosition) return;
-  if (!latest_source_info_.is_statement()) {
-    // Ensure the current expression position is overwritten with the
-    // latest value.
-    latest_source_info_.MakeExpressionPosition(expr->position());
-  }
-}
-
-void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
-  if (expr->position() == kNoSourcePosition) return;
-  latest_source_info_.MakeStatementPosition(expr->position());
-}
-
-bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
-  return temporary_register_allocator()->RegisterIsLive(reg);
-}
-
 bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
   if (!reg.is_valid()) {
     return false;
@@ -726,7 +846,7 @@
   } else if (reg.index() < fixed_register_count()) {
     return true;
   } else {
-    return TemporaryRegisterIsLive(reg);
+    return register_allocator()->RegisterIsLive(reg);
   }
 }
 
@@ -743,19 +863,6 @@
     switch (operand_types[i]) {
       case OperandType::kNone:
         return false;
-      case OperandType::kRegCount: {
-        CHECK_NE(i, 0);
-        CHECK(operand_types[i - 1] == OperandType::kMaybeReg ||
-              operand_types[i - 1] == OperandType::kReg);
-        if (i > 0 && operands[i] > 0) {
-          Register start = Register::FromOperand(operands[i - 1]);
-          Register end(start.index() + static_cast<int>(operands[i]) - 1);
-          if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
-            return false;
-          }
-        }
-        break;
-      }
       case OperandType::kFlag8:
       case OperandType::kIntrinsicId:
         if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
@@ -770,17 +877,28 @@
         }
         break;
       case OperandType::kIdx:
-        // TODO(oth): Consider splitting OperandType::kIdx into two
-        // operand types. One which is a constant pool index that can
-        // be checked, and the other is an unsigned value.
+        // TODO(leszeks): Possibly split this up into constant pool indices and
+        // other indices, for checking.
         break;
+      case OperandType::kUImm:
       case OperandType::kImm:
         break;
-      case OperandType::kMaybeReg:
-        if (Register::FromOperand(operands[i]) == Register(0)) {
-          break;
+      case OperandType::kRegList: {
+        CHECK_LT(i, operand_count - 1);
+        CHECK(operand_types[i + 1] == OperandType::kRegCount);
+        int reg_count = static_cast<int>(operands[i + 1]);
+        if (reg_count == 0) {
+          return Register::FromOperand(operands[i]) == Register(0);
+        } else {
+          Register start = Register::FromOperand(operands[i]);
+          Register end(start.index() + reg_count - 1);
+          if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
+            return false;
+          }
         }
-      // Fall-through to kReg case.
+        i++;  // Skip past kRegCount operand.
+        break;
+      }
       case OperandType::kReg:
       case OperandType::kRegOut: {
         Register reg = Register::FromOperand(operands[i]);
@@ -808,186 +926,14 @@
         }
         break;
       }
+      case OperandType::kRegCount:
+        UNREACHABLE();  // Dealt with in kRegList above.
     }
   }
 
   return true;
 }
 
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
-  switch (op) {
-    case Token::Value::ADD:
-      return Bytecode::kAdd;
-    case Token::Value::SUB:
-      return Bytecode::kSub;
-    case Token::Value::MUL:
-      return Bytecode::kMul;
-    case Token::Value::DIV:
-      return Bytecode::kDiv;
-    case Token::Value::MOD:
-      return Bytecode::kMod;
-    case Token::Value::BIT_OR:
-      return Bytecode::kBitwiseOr;
-    case Token::Value::BIT_XOR:
-      return Bytecode::kBitwiseXor;
-    case Token::Value::BIT_AND:
-      return Bytecode::kBitwiseAnd;
-    case Token::Value::SHL:
-      return Bytecode::kShiftLeft;
-    case Token::Value::SAR:
-      return Bytecode::kShiftRight;
-    case Token::Value::SHR:
-      return Bytecode::kShiftRightLogical;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
-  switch (op) {
-    case Token::Value::ADD:
-      return Bytecode::kInc;
-    case Token::Value::SUB:
-      return Bytecode::kDec;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
-  switch (op) {
-    case Token::Value::EQ:
-      return Bytecode::kTestEqual;
-    case Token::Value::NE:
-      return Bytecode::kTestNotEqual;
-    case Token::Value::EQ_STRICT:
-      return Bytecode::kTestEqualStrict;
-    case Token::Value::LT:
-      return Bytecode::kTestLessThan;
-    case Token::Value::GT:
-      return Bytecode::kTestGreaterThan;
-    case Token::Value::LTE:
-      return Bytecode::kTestLessThanOrEqual;
-    case Token::Value::GTE:
-      return Bytecode::kTestGreaterThanOrEqual;
-    case Token::Value::INSTANCEOF:
-      return Bytecode::kTestInstanceOf;
-    case Token::Value::IN:
-      return Bytecode::kTestIn;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreNamedProperty(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStaNamedPropertySloppy;
-    case STRICT:
-      return Bytecode::kStaNamedPropertyStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreKeyedProperty(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStaKeyedPropertySloppy;
-    case STRICT:
-      return Bytecode::kStaKeyedPropertyStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
-  return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
-                                      : Bytecode::kLdaGlobal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStaGlobalSloppy;
-    case STRICT:
-      return Bytecode::kStaGlobalStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStaLookupSlotSloppy;
-    case STRICT:
-      return Bytecode::kStaLookupSlotStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
-    CreateArgumentsType type) {
-  switch (type) {
-    case CreateArgumentsType::kMappedArguments:
-      return Bytecode::kCreateMappedArguments;
-    case CreateArgumentsType::kUnmappedArguments:
-      return Bytecode::kCreateUnmappedArguments;
-    case CreateArgumentsType::kRestParameter:
-      return Bytecode::kCreateRestParameter;
-  }
-  UNREACHABLE();
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kDeletePropertySloppy;
-    case STRICT:
-      return Bytecode::kDeletePropertyStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
-  switch (tail_call_mode) {
-    case TailCallMode::kDisallow:
-      return Bytecode::kCall;
-    case TailCallMode::kAllow:
-      return Bytecode::kTailCall;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 51b6186..a9fa7a7 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -12,7 +12,7 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -61,23 +61,14 @@
   int fixed_register_count() const { return context_count() + locals_count(); }
 
   // Returns the number of fixed and temporary registers.
-  int fixed_and_temporary_register_count() const {
-    return fixed_register_count() + temporary_register_count();
-  }
-
-  int temporary_register_count() const {
-    return temporary_register_allocator()->allocation_count();
+  int total_register_count() const {
+    DCHECK_LE(fixed_register_count(),
+              register_allocator()->maximum_register_count());
+    return register_allocator()->maximum_register_count();
   }
 
   Register Parameter(int parameter_index) const;
 
-  // Return true if the register |reg| represents a parameter or a
-  // local.
-  bool RegisterIsParameterOrLocal(Register reg) const;
-
-  // Returns true if the register |reg| is a live temporary register.
-  bool TemporaryRegisterIsLive(Register reg) const;
-
   // Constant loads to accumulator.
   BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
   BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -94,11 +85,15 @@
                                     int feedback_slot,
                                     LanguageMode language_mode);
 
-  // Load the object at |slot_index| in |context| into the accumulator.
-  BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index);
+  // Load the object at |slot_index| at |depth| in the context chain starting
+  // with |context| into the accumulator.
+  BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index,
+                                        int depth);
 
-  // Stores the object in the accumulator into |slot_index| of |context|.
-  BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index);
+  // Stores the object in the accumulator into |slot_index| at |depth| in the
+  // context chain starting with |context|.
+  BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index,
+                                         int depth);
 
   // Register-accumulator transfers.
   BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
@@ -127,6 +122,20 @@
   BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
                                        TypeofMode typeof_mode);
 
+  // Lookup the variable with |name|, which is known to be at |slot_index| at
+  // |depth| in the context chain if not shadowed by a context extension
+  // somewhere in that context chain.
+  BytecodeArrayBuilder& LoadLookupContextSlot(const Handle<String> name,
+                                              TypeofMode typeof_mode,
+                                              int slot_index, int depth);
+
+  // Lookup the variable with |name|, which has its feedback in |feedback_slot|
+  // and is known to be global if not shadowed by a context extension somewhere
+  // up to |depth| in that context chain.
+  BytecodeArrayBuilder& LoadLookupGlobalSlot(const Handle<String> name,
+                                             TypeofMode typeof_mode,
+                                             int feedback_slot, int depth);
+
   // Store value in the accumulator into the variable with |name|.
   BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
                                         LanguageMode language_mode);
@@ -139,17 +148,19 @@
   // in the accumulator.
   BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
 
-  // Create a new context for a catch block with |exception| and |name| and the
-  // closure in the accumulator.
+  // Create a new context for a catch block with |exception|, |name|,
+  // |scope_info|, and the closure in the accumulator.
   BytecodeArrayBuilder& CreateCatchContext(Register exception,
-                                           Handle<String> name);
+                                           Handle<String> name,
+                                           Handle<ScopeInfo> scope_info);
 
   // Create a new context with size |slots|.
   BytecodeArrayBuilder& CreateFunctionContext(int slots);
 
-  // Creates a new context for a with-statement with the |object| in a register
-  // and the closure in the accumulator.
-  BytecodeArrayBuilder& CreateWithContext(Register object);
+  // Creates a new context with the given |scope_info| for a with-statement
+  // with the |object| in a register and the closure in the accumulator.
+  BytecodeArrayBuilder& CreateWithContext(Register object,
+                                          Handle<ScopeInfo> scope_info);
 
   // Create a new arguments object in the accumulator.
   BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
@@ -171,46 +182,42 @@
   BytecodeArrayBuilder& PopContext(Register context);
 
   // Call a JS function. The JSFunction or Callable to be called should be in
-  // |callable|, the receiver should be in |receiver_args| and all subsequent
-  // arguments should be in registers <receiver_args + 1> to
-  // <receiver_args + receiver_arg_count - 1>. Type feedback is recorded in
-  // the |feedback_slot| in the type feedback vector.
+  // |callable|. The arguments should be in |args|, with the receiver in
+  // |args[0]|. Type feedback is recorded in the |feedback_slot| in the type
+  // feedback vector.
   BytecodeArrayBuilder& Call(
-      Register callable, Register receiver_args, size_t receiver_arg_count,
-      int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
-
-  BytecodeArrayBuilder& TailCall(Register callable, Register receiver_args,
-                                 size_t receiver_arg_count, int feedback_slot) {
-    return Call(callable, receiver_args, receiver_arg_count, feedback_slot,
-                TailCallMode::kAllow);
-  }
+      Register callable, RegisterList args, int feedback_slot,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
   // Call the new operator. The accumulator holds the |new_target|.
-  // The |constructor| is in a register followed by |arg_count|
-  // consecutive arguments starting at |first_arg| for the constuctor
-  // invocation.
-  BytecodeArrayBuilder& New(Register constructor, Register first_arg,
-                            size_t arg_count);
+  // The |constructor| is in a register and arguments are in |args|.
+  BytecodeArrayBuilder& New(Register constructor, RegisterList args,
+                            int feedback_slot);
 
-  // Call the runtime function with |function_id|. The first argument should be
-  // in |first_arg| and all subsequent arguments should be in registers
-  // <first_arg + 1> to <first_arg + arg_count - 1>.
+  // Call the runtime function with |function_id| and arguments |args|.
   BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
-                                    Register first_arg, size_t arg_count);
+                                    RegisterList args);
+  // Call the runtime function with |function_id| with single argument |arg|.
+  BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
+                                    Register arg);
+  // Call the runtime function with |function_id| with no arguments.
+  BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id);
 
-  // Call the runtime function with |function_id| that returns a pair of values.
-  // The first argument should be in |first_arg| and all subsequent arguments
-  // should be in registers <first_arg + 1> to <first_arg + arg_count - 1>. The
-  // return values will be returned in <first_return> and <first_return + 1>.
+  // Call the runtime function with |function_id| and arguments |args|, that
+  // returns a pair of values. The return values will be returned in
+  // |return_pair|.
   BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
-                                           Register first_arg, size_t arg_count,
-                                           Register first_return);
+                                           RegisterList args,
+                                           RegisterList return_pair);
+  // Call the runtime function with |function_id| with single argument |arg|
+  // that returns a pair of values. The return values will be returned in
+  // |return_pair|.
+  BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+                                           Register arg,
+                                           RegisterList return_pair);
 
-  // Call the JS runtime function with |context_index|. The the receiver should
-  // be in |receiver_args| and all subsequent arguments should be in registers
-  // <receiver + 1> to <receiver + receiver_args_count - 1>.
-  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver_args,
-                                      size_t receiver_args_count);
+  // Call the JS runtime function with |context_index| and arguments |args|.
+  BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
 
   // Operators (register holds the lhs value, accumulator holds the rhs value).
   // Type feedback will be recorded in the |feedback_slot|
@@ -230,15 +237,13 @@
   BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
 
   // Tests.
-  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
+  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
+                                         int feedback_slot = kNoFeedbackSlot);
 
-  // Casts accumulator and stores result in accumulator.
-  BytecodeArrayBuilder& CastAccumulatorToBoolean();
-
-  // Casts accumulator and stores result in register |out|.
-  BytecodeArrayBuilder& CastAccumulatorToJSObject(Register out);
-  BytecodeArrayBuilder& CastAccumulatorToName(Register out);
-  BytecodeArrayBuilder& CastAccumulatorToNumber(Register out);
+  // Converts accumulator and stores result in register |out|.
+  BytecodeArrayBuilder& ConvertAccumulatorToObject(Register out);
+  BytecodeArrayBuilder& ConvertAccumulatorToName(Register out);
+  BytecodeArrayBuilder& ConvertAccumulatorToNumber(Register out);
 
   // Flow Control.
   BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -250,11 +255,10 @@
   BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
 
   BytecodeArrayBuilder& StackCheck(int position);
 
-  BytecodeArrayBuilder& OsrPoll(int loop_depth);
-
   BytecodeArrayBuilder& Throw();
   BytecodeArrayBuilder& ReThrow();
   BytecodeArrayBuilder& Return();
@@ -264,10 +268,10 @@
 
   // Complex flow control.
   BytecodeArrayBuilder& ForInPrepare(Register receiver,
-                                     Register cache_info_triple);
-  BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+                                     RegisterList cache_info_triple);
+  BytecodeArrayBuilder& ForInContinue(Register index, Register cache_length);
   BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
-                                  Register cache_type_array_pair,
+                                  RegisterList cache_type_array_pair,
                                   int feedback_slot);
   BytecodeArrayBuilder& ForInStep(Register index);
 
@@ -292,20 +296,55 @@
 
   void InitializeReturnPosition(FunctionLiteral* literal);
 
-  void SetStatementPosition(Statement* stmt);
-  void SetExpressionPosition(Expression* expr);
-  void SetExpressionAsStatementPosition(Expression* expr);
+  void SetStatementPosition(Statement* stmt) {
+    if (stmt->position() == kNoSourcePosition) return;
+    latest_source_info_.MakeStatementPosition(stmt->position());
+  }
+
+  void SetExpressionPosition(Expression* expr) {
+    if (expr->position() == kNoSourcePosition) return;
+    if (!latest_source_info_.is_statement()) {
+      // Ensure the current expression position is overwritten with the
+      // latest value.
+      latest_source_info_.MakeExpressionPosition(expr->position());
+    }
+  }
+
+  void SetExpressionAsStatementPosition(Expression* expr) {
+    if (expr->position() == kNoSourcePosition) return;
+    latest_source_info_.MakeStatementPosition(expr->position());
+  }
+
+  bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
 
   // Accessors
-  TemporaryRegisterAllocator* temporary_register_allocator() {
-    return &temporary_allocator_;
+  BytecodeRegisterAllocator* register_allocator() {
+    return &register_allocator_;
   }
-  const TemporaryRegisterAllocator* temporary_register_allocator() const {
-    return &temporary_allocator_;
+  const BytecodeRegisterAllocator* register_allocator() const {
+    return &register_allocator_;
   }
   Zone* zone() const { return zone_; }
 
-  void EnsureReturn();
+ private:
+  friend class BytecodeRegisterAllocator;
+
+  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+                     uint32_t operand2, uint32_t operand3));
+  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+                     uint32_t operand2));
+  INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
+  INLINE(void Output(Bytecode bytecode, uint32_t operand0));
+  INLINE(void Output(Bytecode bytecode));
+
+  INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
+  INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
+                         BytecodeLabel* label));
+
+  bool RegisterIsValid(Register reg) const;
+  bool OperandsAreValid(Bytecode bytecode, int operand_count,
+                        uint32_t operand0 = 0, uint32_t operand1 = 0,
+                        uint32_t operand2 = 0, uint32_t operand3 = 0) const;
 
   static uint32_t RegisterOperand(Register reg) {
     return static_cast<uint32_t>(reg.ToOperand());
@@ -325,40 +364,6 @@
     return static_cast<uint32_t>(value);
   }
 
- private:
-  friend class BytecodeRegisterAllocator;
-
-  static Bytecode BytecodeForBinaryOperation(Token::Value op);
-  static Bytecode BytecodeForCountOperation(Token::Value op);
-  static Bytecode BytecodeForCompareOperation(Token::Value op);
-  static Bytecode BytecodeForStoreNamedProperty(LanguageMode language_mode);
-  static Bytecode BytecodeForStoreKeyedProperty(LanguageMode language_mode);
-  static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
-  static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
-  static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
-  static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
-  static Bytecode BytecodeForDelete(LanguageMode language_mode);
-  static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
-
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2, uint32_t operand3);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
-  void Output(Bytecode bytecode, uint32_t operand0);
-  void Output(Bytecode bytecode);
-
-  BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
-                                   BytecodeLabel* label);
-
-  bool RegisterIsValid(Register reg) const;
-  bool OperandsAreValid(Bytecode bytecode, int operand_count,
-                        uint32_t operand0 = 0, uint32_t operand1 = 0,
-                        uint32_t operand2 = 0, uint32_t operand3 = 0) const;
-
-  // Attach latest source position to |node|.
-  void AttachSourceInfo(BytecodeNode* node);
-
   // Set position for return.
   void SetReturnPosition();
 
@@ -395,11 +400,13 @@
   int local_register_count_;
   int context_register_count_;
   int return_position_;
-  TemporaryRegisterAllocator temporary_allocator_;
+  BytecodeRegisterAllocator register_allocator_;
   BytecodeArrayWriter bytecode_array_writer_;
   BytecodePipelineStage* pipeline_;
   BytecodeSourceInfo latest_source_info_;
 
+  static int const kNoFeedbackSlot = 0;
+
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
 
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index 84c0028..e596b11 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -97,6 +97,13 @@
   return GetUnsignedOperand(operand_index, OperandType::kFlag8);
 }
 
+uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
+    int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kUImm);
+  return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
 int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
   DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
             OperandType::kImm);
@@ -133,11 +140,11 @@
   DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
   const OperandType* operand_types =
       Bytecodes::GetOperandTypes(current_bytecode());
-  DCHECK(Bytecodes::IsRegisterOperandType(operand_types[operand_index]));
-  if (operand_types[operand_index + 1] == OperandType::kRegCount) {
+  OperandType operand_type = operand_types[operand_index];
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  if (operand_type == OperandType::kRegList) {
     return GetRegisterCountOperand(operand_index + 1);
   } else {
-    OperandType operand_type = operand_types[operand_index];
     return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
   }
 }
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 0f7c6c7..0922625 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -31,6 +31,7 @@
   }
 
   uint32_t GetFlagOperand(int operand_index) const;
+  uint32_t GetUnsignedImmediateOperand(int operand_index) const;
   int32_t GetImmediateOperand(int operand_index) const;
   uint32_t GetIndexOperand(int operand_index) const;
   uint32_t GetRegisterCountOperand(int operand_index) const;
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index 6694a36..fb38768 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -21,27 +21,23 @@
     Zone* zone, ConstantArrayBuilder* constant_array_builder,
     SourcePositionTableBuilder::RecordingMode source_position_mode)
     : bytecodes_(zone),
-      max_register_count_(0),
       unbound_jumps_(0),
       source_position_table_builder_(zone, source_position_mode),
-      constant_array_builder_(constant_array_builder) {}
+      constant_array_builder_(constant_array_builder) {
+  bytecodes_.reserve(512);  // Derived via experimentation.
+}
 
 // override
 BytecodeArrayWriter::~BytecodeArrayWriter() {}
 
 // override
 Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
-    Isolate* isolate, int fixed_register_count, int parameter_count,
+    Isolate* isolate, int register_count, int parameter_count,
     Handle<FixedArray> handler_table) {
   DCHECK_EQ(0, unbound_jumps_);
 
   int bytecode_size = static_cast<int>(bytecodes()->size());
-
-  // All locals need a frame slot for the debugger, but may not be
-  // present in generated code.
-  int frame_size_for_locals = fixed_register_count * kPointerSize;
-  int frame_size_used = max_register_count() * kPointerSize;
-  int frame_size = std::max(frame_size_for_locals, frame_size_used);
+  int frame_size = register_count * kPointerSize;
   Handle<FixedArray> constant_pool =
       constant_array_builder()->ToFixedArray(isolate);
   Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
@@ -104,116 +100,48 @@
   }
 }
 
-namespace {
-
-OperandScale ScaleForScalableByteOperand(OperandSize operand_size) {
-  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
-                static_cast<int>(OperandScale::kSingle));
-  STATIC_ASSERT(static_cast<int>(OperandSize::kShort) ==
-                static_cast<int>(OperandScale::kDouble));
-  STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) ==
-                static_cast<int>(OperandScale::kQuadruple));
-  return static_cast<OperandScale>(operand_size);
-}
-
-OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) {
-  int32_t signed_operand = static_cast<int32_t>(operand_value);
-  OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand);
-  return ScaleForScalableByteOperand(bytes_required);
-}
-
-OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) {
-  OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value);
-  return ScaleForScalableByteOperand(bytes_required);
-}
-
-OperandScale GetOperandScale(const BytecodeNode* const node) {
-  const OperandTypeInfo* operand_type_infos =
-      Bytecodes::GetOperandTypeInfos(node->bytecode());
-  OperandScale operand_scale = OperandScale::kSingle;
-  int operand_count = node->operand_count();
-  for (int i = 0; i < operand_count; ++i) {
-    switch (operand_type_infos[i]) {
-      case OperandTypeInfo::kScalableSignedByte: {
-        uint32_t operand = node->operand(i);
-        operand_scale =
-            std::max(operand_scale, OperandScaleForScalableSignedByte(operand));
-        break;
-      }
-      case OperandTypeInfo::kScalableUnsignedByte: {
-        uint32_t operand = node->operand(i);
-        operand_scale = std::max(operand_scale,
-                                 OperandScaleForScalableUnsignedByte(operand));
-        break;
-      }
-      case OperandTypeInfo::kFixedUnsignedByte:
-      case OperandTypeInfo::kFixedUnsignedShort:
-        break;
-      case OperandTypeInfo::kNone:
-        UNREACHABLE();
-        break;
-    }
-  }
-  return operand_scale;
-}
-
-}  // namespace
-
 void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
   DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
 
-  uint8_t buffer[kMaxSizeOfPackedBytecode];
-  uint8_t* buffer_limit = buffer;
+  Bytecode bytecode = node->bytecode();
+  OperandScale operand_scale = node->operand_scale();
 
-  OperandScale operand_scale = GetOperandScale(node);
   if (operand_scale != OperandScale::kSingle) {
     Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
-    *buffer_limit++ = Bytecodes::ToByte(prefix);
+    bytecodes()->push_back(Bytecodes::ToByte(prefix));
   }
-
-  Bytecode bytecode = node->bytecode();
-  *buffer_limit++ = Bytecodes::ToByte(bytecode);
+  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
 
   const uint32_t* const operands = node->operands();
-  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
-  const int operand_count = Bytecodes::NumberOfOperands(bytecode);
+  const int operand_count = node->operand_count();
+  const OperandSize* operand_sizes =
+      Bytecodes::GetOperandSizes(bytecode, operand_scale);
   for (int i = 0; i < operand_count; ++i) {
-    OperandSize operand_size =
-        Bytecodes::SizeOfOperand(operand_types[i], operand_scale);
-    switch (operand_size) {
+    switch (operand_sizes[i]) {
       case OperandSize::kNone:
         UNREACHABLE();
         break;
       case OperandSize::kByte:
-        *buffer_limit++ = static_cast<uint8_t>(operands[i]);
+        bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
         break;
       case OperandSize::kShort: {
-        WriteUnalignedUInt16(buffer_limit, operands[i]);
-        buffer_limit += 2;
+        uint16_t operand = static_cast<uint16_t>(operands[i]);
+        const uint8_t* raw_operand = reinterpret_cast<const uint8_t*>(&operand);
+        bytecodes()->push_back(raw_operand[0]);
+        bytecodes()->push_back(raw_operand[1]);
         break;
       }
       case OperandSize::kQuad: {
-        WriteUnalignedUInt32(buffer_limit, operands[i]);
-        buffer_limit += 4;
+        const uint8_t* raw_operand =
+            reinterpret_cast<const uint8_t*>(&operands[i]);
+        bytecodes()->push_back(raw_operand[0]);
+        bytecodes()->push_back(raw_operand[1]);
+        bytecodes()->push_back(raw_operand[2]);
+        bytecodes()->push_back(raw_operand[3]);
         break;
       }
     }
-
-    int count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
-    if (count == 0) {
-      continue;
-    }
-    // NB operand_types is terminated by OperandType::kNone so
-    // operand_types[i + 1] is valid whilst i < operand_count.
-    if (operand_types[i + 1] == OperandType::kRegCount) {
-      count = static_cast<int>(operands[i]);
-    }
-    Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
-    max_register_count_ = std::max(max_register_count_, reg.index() + count);
   }
-
-  DCHECK_LE(buffer_limit, buffer + sizeof(buffer));
-  bytecodes()->insert(bytecodes()->end(), buffer, buffer_limit);
 }
 
 // static
@@ -247,18 +175,17 @@
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   size_t operand_location = jump_location + 1;
   DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
-  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
-    // The jump fits within the range of an Imm operand, so cancel
+  if (Bytecodes::ScaleForSignedOperand(delta) == OperandScale::kSingle) {
+    // The jump fits within the range of an Imm8 operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
     bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
   } else {
-    // The jump does not fit within the range of an Imm operand, so
+    // The jump does not fit within the range of an Imm8 operand, so
     // commit reservation putting the offset into the constant pool,
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kByte, Smi::FromInt(delta));
-    DCHECK_LE(entry, kMaxUInt32);
     DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
               OperandSize::kByte);
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
@@ -273,14 +200,21 @@
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   size_t operand_location = jump_location + 1;
   uint8_t operand_bytes[2];
-  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
+  if (Bytecodes::ScaleForSignedOperand(delta) <= OperandScale::kDouble) {
+    // The jump fits within the range of an Imm16 operand, so cancel
+    // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
     WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
   } else {
-    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    // The jump does not fit within the range of an Imm16 operand, so
+    // commit reservation putting the offset into the constant pool,
+    // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kShort, Smi::FromInt(delta));
+    DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
+              OperandSize::kShort);
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
     WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
   }
   DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
@@ -351,13 +285,14 @@
     // Label has been bound already so this is a backwards jump.
     size_t abs_delta = current_offset - label->offset();
     int delta = -static_cast<int>(abs_delta);
-    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
-    if (operand_size > OperandSize::kByte) {
+    OperandScale operand_scale = Bytecodes::ScaleForSignedOperand(delta);
+    if (operand_scale > OperandScale::kSingle) {
       // Adjust for scaling byte prefix for wide jump offset.
       DCHECK_LE(delta, 0);
       delta -= 1;
     }
-    node->set_bytecode(node->bytecode(), delta);
+    DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
+    node->set_bytecode(node->bytecode(), delta, node->operand(1));
   } else {
     // The label has not yet been bound so this is a forward reference
     // that will be patched when the label is bound. We create a
@@ -369,6 +304,7 @@
     label->set_referrer(current_offset);
     OperandSize reserved_operand_size =
         constant_array_builder()->CreateReservedEntry();
+    DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
     switch (reserved_operand_size) {
       case OperandSize::kNone:
         UNREACHABLE();
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
index 17fe3d4..712fcb9 100644
--- a/src/interpreter/bytecode-array-writer.h
+++ b/src/interpreter/bytecode-array-writer.h
@@ -33,7 +33,7 @@
   void BindLabel(BytecodeLabel* label) override;
   void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
   Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int fixed_register_count, int parameter_count,
+      Isolate* isolate, int register_count, int parameter_count,
       Handle<FixedArray> handler_table) override;
 
  private:
@@ -69,10 +69,8 @@
   ConstantArrayBuilder* constant_array_builder() {
     return constant_array_builder_;
   }
-  int max_register_count() { return max_register_count_; }
 
   ZoneVector<uint8_t> bytecodes_;
-  int max_register_count_;
   int unbound_jumps_;
   SourcePositionTableBuilder source_position_table_builder_;
   ConstantArrayBuilder* constant_array_builder_;
diff --git a/src/interpreter/bytecode-dead-code-optimizer.cc b/src/interpreter/bytecode-dead-code-optimizer.cc
index 5d301c7..848036c 100644
--- a/src/interpreter/bytecode-dead-code-optimizer.cc
+++ b/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -14,10 +14,10 @@
 
 // override
 Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
-    Isolate* isolate, int fixed_register_count, int parameter_count,
+    Isolate* isolate, int register_count, int parameter_count,
     Handle<FixedArray> handler_table) {
-  return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
-                                      parameter_count, handler_table);
+  return next_stage_->ToBytecodeArray(isolate, register_count, parameter_count,
+                                      handler_table);
 }
 
 // override
diff --git a/src/interpreter/bytecode-dead-code-optimizer.h b/src/interpreter/bytecode-dead-code-optimizer.h
index 8a9732c..188d610 100644
--- a/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/src/interpreter/bytecode-dead-code-optimizer.h
@@ -24,7 +24,7 @@
   void BindLabel(BytecodeLabel* label) override;
   void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
   Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int fixed_register_count, int parameter_count,
+      Isolate* isolate, int register_count, int parameter_count,
       Handle<FixedArray> handler_table) override;
 
  private:
diff --git a/src/interpreter/bytecode-decoder.cc b/src/interpreter/bytecode-decoder.cc
index 74c5806..4975189 100644
--- a/src/interpreter/bytecode-decoder.cc
+++ b/src/interpreter/bytecode-decoder.cc
@@ -23,6 +23,15 @@
 }
 
 // static
+RegisterList BytecodeDecoder::DecodeRegisterListOperand(
+    const uint8_t* operand_start, uint32_t count, OperandType operand_type,
+    OperandScale operand_scale) {
+  Register first_reg =
+      DecodeRegisterOperand(operand_start, operand_type, operand_scale);
+  return RegisterList(first_reg.index(), static_cast<int>(count));
+}
+
+// static
 int32_t BytecodeDecoder::DecodeSignedOperand(const uint8_t* operand_start,
                                              OperandType operand_type,
                                              OperandScale operand_scale) {
@@ -94,7 +103,6 @@
   if (Bytecodes::IsDebugBreak(bytecode)) return os;
 
   int number_of_operands = Bytecodes::NumberOfOperands(bytecode);
-  int range = 0;
   for (int i = 0; i < number_of_operands; i++) {
     OperandType op_type = Bytecodes::GetOperandType(bytecode, i);
     int operand_offset =
@@ -102,11 +110,8 @@
     const uint8_t* operand_start =
         &bytecode_start[prefix_offset + operand_offset];
     switch (op_type) {
-      case interpreter::OperandType::kRegCount:
-        os << "#"
-           << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
-        break;
       case interpreter::OperandType::kIdx:
+      case interpreter::OperandType::kUImm:
       case interpreter::OperandType::kRuntimeId:
       case interpreter::OperandType::kIntrinsicId:
         os << "["
@@ -121,7 +126,6 @@
         os << "#"
            << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
         break;
-      case interpreter::OperandType::kMaybeReg:
       case interpreter::OperandType::kReg:
       case interpreter::OperandType::kRegOut: {
         Register reg =
@@ -129,19 +133,40 @@
         os << reg.ToString(parameter_count);
         break;
       }
-      case interpreter::OperandType::kRegOutTriple:
-        range += 1;
+      case interpreter::OperandType::kRegOutTriple: {
+        RegisterList reg_list =
+            DecodeRegisterListOperand(operand_start, 3, op_type, operand_scale);
+        os << reg_list.first_register().ToString(parameter_count) << "-"
+           << reg_list.last_register().ToString(parameter_count);
+        break;
+      }
       case interpreter::OperandType::kRegOutPair:
       case interpreter::OperandType::kRegPair: {
-        range += 1;
-        Register first_reg =
-            DecodeRegisterOperand(operand_start, op_type, operand_scale);
-        Register last_reg = Register(first_reg.index() + range);
-        os << first_reg.ToString(parameter_count) << "-"
-           << last_reg.ToString(parameter_count);
+        RegisterList reg_list =
+            DecodeRegisterListOperand(operand_start, 2, op_type, operand_scale);
+        os << reg_list.first_register().ToString(parameter_count) << "-"
+           << reg_list.last_register().ToString(parameter_count);
+        break;
+      }
+      case interpreter::OperandType::kRegList: {
+        DCHECK_LT(i, number_of_operands - 1);
+        DCHECK_EQ(Bytecodes::GetOperandType(bytecode, i + 1),
+                  OperandType::kRegCount);
+        int reg_count_offset =
+            Bytecodes::GetOperandOffset(bytecode, i + 1, operand_scale);
+        const uint8_t* reg_count_operand =
+            &bytecode_start[prefix_offset + reg_count_offset];
+        uint32_t count = DecodeUnsignedOperand(
+            reg_count_operand, OperandType::kRegCount, operand_scale);
+        RegisterList reg_list = DecodeRegisterListOperand(
+            operand_start, count, op_type, operand_scale);
+        os << reg_list.first_register().ToString(parameter_count) << "-"
+           << reg_list.last_register().ToString(parameter_count);
+        i++;  // Skip kRegCount.
         break;
       }
       case interpreter::OperandType::kNone:
+      case interpreter::OperandType::kRegCount:  // Dealt with in kRegList.
         UNREACHABLE();
         break;
     }
diff --git a/src/interpreter/bytecode-decoder.h b/src/interpreter/bytecode-decoder.h
index 6613179..d1749ef 100644
--- a/src/interpreter/bytecode-decoder.h
+++ b/src/interpreter/bytecode-decoder.h
@@ -21,6 +21,12 @@
                                         OperandType operand_type,
                                         OperandScale operand_scale);
 
+  // Decodes a register list operand in a byte array.
+  static RegisterList DecodeRegisterListOperand(const uint8_t* operand_start,
+                                                uint32_t count,
+                                                OperandType operand_type,
+                                                OperandScale operand_scale);
+
   // Decodes a signed operand in a byte array.
   static int32_t DecodeSignedOperand(const uint8_t* operand_start,
                                      OperandType operand_type,
diff --git a/src/interpreter/bytecode-flags.cc b/src/interpreter/bytecode-flags.cc
index 9b25dbd..158af13 100644
--- a/src/interpreter/bytecode-flags.cc
+++ b/src/interpreter/bytecode-flags.cc
@@ -11,6 +11,14 @@
 namespace interpreter {
 
 // static
+uint8_t CreateArrayLiteralFlags::Encode(bool use_fast_shallow_clone,
+                                        int runtime_flags) {
+  uint8_t result = FlagsBits::encode(runtime_flags);
+  result |= FastShallowCloneBit::encode(use_fast_shallow_clone);
+  return result;
+}
+
+// static
 uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
                                          int properties_count,
                                          int runtime_flags) {
diff --git a/src/interpreter/bytecode-flags.h b/src/interpreter/bytecode-flags.h
index 1068d8a..6e87ce2 100644
--- a/src/interpreter/bytecode-flags.h
+++ b/src/interpreter/bytecode-flags.h
@@ -11,6 +11,17 @@
 namespace internal {
 namespace interpreter {
 
+class CreateArrayLiteralFlags {
+ public:
+  class FlagsBits : public BitField8<int, 0, 3> {};
+  class FastShallowCloneBit : public BitField8<bool, FlagsBits::kNext, 1> {};
+
+  static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CreateArrayLiteralFlags);
+};
+
 class CreateObjectLiteralFlags {
  public:
   class FlagsBits : public BitField8<int, 0, 3> {};
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 6ff43a4..db5a596 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -4,15 +4,16 @@
 
 #include "src/interpreter/bytecode-generator.h"
 
+#include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
 #include "src/code-stubs.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/control-flow-builders.h"
 #include "src/objects.h"
-#include "src/parsing/parser.h"
 #include "src/parsing/token.h"
 
 namespace v8 {
@@ -216,10 +217,10 @@
       case CMD_CONTINUE:
         UNREACHABLE();
       case CMD_RETURN:
-        generator()->builder()->Return();
+        generator()->BuildReturn();
         return true;
       case CMD_RETHROW:
-        generator()->builder()->ReThrow();
+        generator()->BuildReThrow();
         return true;
     }
     return false;
@@ -310,7 +311,7 @@
       case CMD_RETURN:
         break;
       case CMD_RETHROW:
-        generator()->builder()->ReThrow();
+        generator()->BuildReThrow();
         return true;
     }
     return false;
@@ -373,75 +374,35 @@
  public:
   explicit RegisterAllocationScope(BytecodeGenerator* generator)
       : generator_(generator),
-        outer_(generator->register_allocator()),
-        allocator_(builder()->zone(),
-                   builder()->temporary_register_allocator()) {
-    generator_->set_register_allocator(this);
-  }
+        outer_next_register_index_(
+            generator->register_allocator()->next_register_index()) {}
 
   virtual ~RegisterAllocationScope() {
-    generator_->set_register_allocator(outer_);
+    generator_->register_allocator()->ReleaseRegisters(
+        outer_next_register_index_);
   }
 
-  Register NewRegister() {
-    RegisterAllocationScope* current_scope = generator()->register_allocator();
-    if ((current_scope == this) ||
-        (current_scope->outer() == this &&
-         !current_scope->allocator_.HasConsecutiveAllocations())) {
-      // Regular case - Allocating registers in current or outer context.
-      // VisitForRegisterValue allocates register in outer context.
-      return allocator_.NewRegister();
-    } else {
-      // If it is required to allocate a register other than current or outer
-      // scopes, allocate a new temporary register. It might be expensive to
-      // walk the full context chain and compute the list of consecutive
-      // reservations in the innerscopes.
-      UNIMPLEMENTED();
-      return Register::invalid_value();
-    }
-  }
-
-  void PrepareForConsecutiveAllocations(int count) {
-    allocator_.PrepareForConsecutiveAllocations(count);
-  }
-
-  Register NextConsecutiveRegister() {
-    return allocator_.NextConsecutiveRegister();
-  }
-
-  bool RegisterIsAllocatedInThisScope(Register reg) const {
-    return allocator_.RegisterIsAllocatedInThisScope(reg);
-  }
-
-  RegisterAllocationScope* outer() const { return outer_; }
-
  private:
-  BytecodeGenerator* generator() const { return generator_; }
-  BytecodeArrayBuilder* builder() const { return generator_->builder(); }
-
   BytecodeGenerator* generator_;
-  RegisterAllocationScope* outer_;
-  BytecodeRegisterAllocator allocator_;
+  int outer_next_register_index_;
 
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
 };
 
-// Scoped base class for determining where the result of an expression
-// is stored.
+// Scoped base class for determining how the result of an expression will be
+// used.
 class BytecodeGenerator::ExpressionResultScope {
  public:
   ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
       : generator_(generator),
         kind_(kind),
         outer_(generator->execution_result()),
-        allocator_(generator),
-        result_identified_(false) {
+        allocator_(generator) {
     generator_->set_execution_result(this);
   }
 
   virtual ~ExpressionResultScope() {
     generator_->set_execution_result(outer_);
-    DCHECK(result_identified() || generator_->HasStackOverflow());
   }
 
   bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -453,28 +414,11 @@
     return reinterpret_cast<TestResultScope*>(this);
   }
 
-  virtual void SetResultInAccumulator() = 0;
-  virtual void SetResultInRegister(Register reg) = 0;
-
- protected:
-  ExpressionResultScope* outer() const { return outer_; }
-  BytecodeArrayBuilder* builder() const { return generator_->builder(); }
-  BytecodeGenerator* generator() const { return generator_; }
-  const RegisterAllocationScope* allocator() const { return &allocator_; }
-
-  void set_result_identified() {
-    DCHECK(!result_identified());
-    result_identified_ = true;
-  }
-
-  bool result_identified() const { return result_identified_; }
-
  private:
   BytecodeGenerator* generator_;
   Expression::Context kind_;
   ExpressionResultScope* outer_;
   RegisterAllocationScope allocator_;
-  bool result_identified_;
 
   DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
 };
@@ -485,61 +429,15 @@
     : public ExpressionResultScope {
  public:
   explicit EffectResultScope(BytecodeGenerator* generator)
-      : ExpressionResultScope(generator, Expression::kEffect) {
-    set_result_identified();
-  }
-
-  virtual void SetResultInAccumulator() {}
-  virtual void SetResultInRegister(Register reg) {}
+      : ExpressionResultScope(generator, Expression::kEffect) {}
 };
 
 // Scoped class used when the result of the current expression to be
-// evaluated should go into the interpreter's accumulator register.
-class BytecodeGenerator::AccumulatorResultScope final
-    : public ExpressionResultScope {
+// evaluated should go into the interpreter's accumulator.
+class BytecodeGenerator::ValueResultScope final : public ExpressionResultScope {
  public:
-  explicit AccumulatorResultScope(BytecodeGenerator* generator)
+  explicit ValueResultScope(BytecodeGenerator* generator)
       : ExpressionResultScope(generator, Expression::kValue) {}
-
-  virtual void SetResultInAccumulator() { set_result_identified(); }
-
-  virtual void SetResultInRegister(Register reg) {
-    builder()->LoadAccumulatorWithRegister(reg);
-    set_result_identified();
-  }
-};
-
-// Scoped class used when the result of the current expression to be
-// evaluated should go into an interpreter register.
-class BytecodeGenerator::RegisterResultScope final
-    : public ExpressionResultScope {
- public:
-  explicit RegisterResultScope(BytecodeGenerator* generator)
-      : ExpressionResultScope(generator, Expression::kValue) {}
-
-  virtual void SetResultInAccumulator() {
-    result_register_ = allocator()->outer()->NewRegister();
-    builder()->StoreAccumulatorInRegister(result_register_);
-    set_result_identified();
-  }
-
-  virtual void SetResultInRegister(Register reg) {
-    DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
-           (builder()->TemporaryRegisterIsLive(reg) &&
-            !allocator()->RegisterIsAllocatedInThisScope(reg)));
-    result_register_ = reg;
-    set_result_identified();
-  }
-
-  Register ResultRegister() {
-    if (generator()->HasStackOverflow() && !result_identified()) {
-      SetResultInAccumulator();
-    }
-    return result_register_;
-  }
-
- private:
-  Register result_register_;
 };
 
 // Scoped class used when the result of the current expression to be
@@ -554,18 +452,10 @@
         fallthrough_(fallthrough),
         result_consumed_by_test_(false) {}
 
-  virtual void SetResultInAccumulator() { set_result_identified(); }
-
-  virtual void SetResultInRegister(Register reg) {
-    builder()->LoadAccumulatorWithRegister(reg);
-    set_result_identified();
-  }
-
   // Used when code special cases for TestResultScope and consumes any
   // possible value by testing and jumping to a then/else label.
   void SetResultConsumedByTest() {
     result_consumed_by_test_ = true;
-    set_result_identified();
   }
 
   bool ResultConsumedByTest() { return result_consumed_by_test_; }
@@ -677,22 +567,17 @@
       execution_control_(nullptr),
       execution_context_(nullptr),
       execution_result_(nullptr),
-      register_allocator_(nullptr),
       generator_resume_points_(info->literal()->yield_count(), info->zone()),
       generator_state_(),
       loop_depth_(0),
       home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
       prototype_string_(info->isolate()->factory()->prototype_string()) {
-  InitializeAstVisitor(info->isolate()->stack_guard()->real_climit());
 }
 
 Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
-  // Create an inner HandleScope to avoid unnecessarily canonicalizing handles
-  // created as part of bytecode finalization.
-  HandleScope scope(isolate);
   AllocateDeferredConstants();
   if (HasStackOverflow()) return Handle<BytecodeArray>();
-  return scope.CloseAndEscape(builder()->ToBytecodeArray(isolate));
+  return builder()->ToBytecodeArray(isolate);
 }
 
 void BytecodeGenerator::AllocateDeferredConstants() {
@@ -726,11 +611,13 @@
   }
 }
 
-void BytecodeGenerator::GenerateBytecode() {
+void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
 
+  InitializeAstVisitor(stack_limit);
+
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
 
@@ -744,12 +631,11 @@
     VisitGeneratorPrologue();
   }
 
-  // Build function context only if there are context allocated variables.
   if (scope()->NeedsContext()) {
     // Push a new inner context scope for the function.
-    VisitNewLocalFunctionContext();
+    BuildNewLocalActivationContext();
     ContextScope local_function_context(this, scope(), false);
-    VisitBuildLocalActivationContext();
+    BuildLocalActivationContextInitialization();
     GenerateBytecodeBody();
   } else {
     GenerateBytecodeBody();
@@ -763,7 +649,13 @@
     if (!label.is_bound()) builder()->Bind(&label);
   }
 
-  builder()->EnsureReturn();
+  // Emit an implicit return instruction in case control flow can fall off the
+  // end of the function without an explicit return being present on all paths.
+  if (builder()->RequiresImplicitReturn()) {
+    builder()->LoadUndefined();
+    BuildReturn();
+  }
+  DCHECK(!builder()->RequiresImplicitReturn());
 }
 
 void BytecodeGenerator::GenerateBytecodeBody() {
@@ -771,8 +663,7 @@
   VisitArgumentsObject(scope()->arguments());
 
   // Build rest arguments array if it is used.
-  int rest_index;
-  Variable* rest_parameter = scope()->rest_parameter(&rest_index);
+  Variable* rest_parameter = scope()->rest_parameter();
   VisitRestArgumentsArray(rest_parameter);
 
   // Build assignment to {.this_function} variable if it is used.
@@ -781,10 +672,8 @@
   // Build assignment to {new.target} variable if it is used.
   VisitNewTargetVariable(scope()->new_target_var());
 
-  // TODO(rmcilroy): Emit tracing call if requested to do so.
-  if (FLAG_trace) {
-    UNIMPLEMENTED();
-  }
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
 
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
@@ -829,14 +718,6 @@
 
   loop_builder->LoopHeader(&resume_points_in_loop);
 
-  // Insert an explicit {OsrPoll} right after the loop header, to trigger
-  // on-stack replacement when armed for the given loop nesting depth.
-  if (FLAG_ignition_osr) {
-    // TODO(4764): Merge this with another bytecode (e.g. {Jump} back edge).
-    int level = Min(loop_depth_, AbstractCode::kMaxLoopNestingMarker - 1);
-    builder()->OsrPoll(level);
-  }
-
   if (stmt->yield_count() > 0) {
     // If we are not resuming, fall through to loop body.
     // If we are resuming, perform state dispatch.
@@ -882,7 +763,7 @@
 void BytecodeGenerator::VisitBlock(Block* stmt) {
   // Visit declarations and statements.
   if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
-    VisitNewLocalBlockContext(stmt->scope());
+    BuildNewLocalBlockContext(stmt->scope());
     ContextScope scope(this, stmt->scope());
     VisitBlockDeclarationsAndStatements(stmt);
   } else {
@@ -903,7 +784,6 @@
 void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
       FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
@@ -926,8 +806,9 @@
       break;
     case VariableLocation::CONTEXT:
       if (variable->binding_needs_init()) {
+        DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
         builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
-                                                  variable->index());
+                                                  variable->index(), 0);
       }
       break;
     case VariableLocation::LOOKUP: {
@@ -939,18 +820,24 @@
       builder()
           ->LoadLiteral(variable->name())
           .StoreAccumulatorInRegister(name)
-          .CallRuntime(Runtime::kDeclareEvalVar, name, 1);
+          .CallRuntime(Runtime::kDeclareEvalVar, name);
       break;
     }
     case VariableLocation::MODULE:
-      UNREACHABLE();
+      if (variable->IsExport() && variable->binding_needs_init()) {
+        builder()->LoadTheHole();
+        VisitVariableAssignment(variable, Token::INIT,
+                                FeedbackVectorSlot::Invalid());
+      }
+      // Nothing to do for imports.
+      break;
   }
 }
 
 void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
+  DCHECK(variable->mode() == LET || variable->mode() == VAR);
   switch (variable->location()) {
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
       globals_builder()->AddFunctionDeclaration(slot, decl->fun());
@@ -959,8 +846,6 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
       VisitForAccumulatorValue(decl->fun());
-      DCHECK(variable->mode() == LET || variable->mode() == VAR ||
-             variable->mode() == CONST);
       VisitVariableAssignment(variable, Token::INIT,
                               FeedbackVectorSlot::Invalid());
       break;
@@ -968,23 +853,27 @@
     case VariableLocation::CONTEXT: {
       DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
       VisitForAccumulatorValue(decl->fun());
-      builder()->StoreContextSlot(execution_context()->reg(),
-                                  variable->index());
+      builder()->StoreContextSlot(execution_context()->reg(), variable->index(),
+                                  0);
       break;
     }
     case VariableLocation::LOOKUP: {
-      register_allocator()->PrepareForConsecutiveAllocations(2);
-      Register name = register_allocator()->NextConsecutiveRegister();
-      Register literal = register_allocator()->NextConsecutiveRegister();
-      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
-
+      RegisterList args = register_allocator()->NewRegisterList(2);
+      builder()
+          ->LoadLiteral(variable->name())
+          .StoreAccumulatorInRegister(args[0]);
       VisitForAccumulatorValue(decl->fun());
-      builder()->StoreAccumulatorInRegister(literal).CallRuntime(
-          Runtime::kDeclareEvalFunction, name, 2);
+      builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
+          Runtime::kDeclareEvalFunction, args);
       break;
     }
     case VariableLocation::MODULE:
-      UNREACHABLE();
+      DCHECK_EQ(variable->mode(), LET);
+      DCHECK(variable->IsExport());
+      VisitForAccumulatorValue(decl->fun());
+      VisitVariableAssignment(variable, Token::INIT,
+                              FeedbackVectorSlot::Invalid());
+      break;
   }
 }
 
@@ -1002,20 +891,15 @@
       builder()->AllocateConstantPoolEntry());
   int encoded_flags = info()->GetDeclareGlobalsFlags();
 
-  register_allocator()->PrepareForConsecutiveAllocations(3);
-
-  Register pairs = register_allocator()->NextConsecutiveRegister();
-  Register flags = register_allocator()->NextConsecutiveRegister();
-  Register function = register_allocator()->NextConsecutiveRegister();
-
   // Emit code to declare globals.
+  RegisterList args = register_allocator()->NewRegisterList(3);
   builder()
       ->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
-      .StoreAccumulatorInRegister(pairs)
+      .StoreAccumulatorInRegister(args[0])
       .LoadLiteral(Smi::FromInt(encoded_flags))
-      .StoreAccumulatorInRegister(flags)
-      .MoveRegister(Register::function_closure(), function)
-      .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, pairs, 3);
+      .StoreAccumulatorInRegister(args[1])
+      .MoveRegister(Register::function_closure(), args[2])
+      .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, args);
 
   // Push and reset globals builder.
   global_declarations_.push_back(globals_builder());
@@ -1097,7 +981,7 @@
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
-  VisitNewLocalWithContext();
+  BuildNewLocalWithContext(stmt->scope());
   VisitInScope(stmt->statement(), stmt->scope());
 }
 
@@ -1126,7 +1010,9 @@
 
     // Perform label comparison as if via '===' with tag.
     VisitForAccumulatorValue(clause->label());
-    builder()->CompareOperation(Token::Value::EQ_STRICT, tag);
+    builder()->CompareOperation(
+        Token::Value::EQ_STRICT, tag,
+        feedback_index(clause->CompareOperationFeedbackSlot()));
     switch_builder.Case(i);
   }
 
@@ -1168,13 +1054,16 @@
   } else if (stmt->cond()->ToBooleanIsTrue()) {
     VisitIterationHeader(stmt, &loop_builder);
     VisitIterationBody(stmt, &loop_builder);
-    loop_builder.JumpToHeader();
+    loop_builder.JumpToHeader(loop_depth_);
   } else {
     VisitIterationHeader(stmt, &loop_builder);
     VisitIterationBody(stmt, &loop_builder);
     builder()->SetExpressionAsStatementPosition(stmt->cond());
-    VisitForTest(stmt->cond(), loop_builder.header_labels(),
-                 loop_builder.break_labels(), TestFallthrough::kElse);
+    BytecodeLabels loop_backbranch(zone());
+    VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(),
+                 TestFallthrough::kThen);
+    loop_backbranch.Bind(builder());
+    loop_builder.JumpToHeader(loop_depth_);
   }
   loop_builder.EndLoop();
 }
@@ -1195,7 +1084,7 @@
     loop_body.Bind(builder());
   }
   VisitIterationBody(stmt, &loop_builder);
-  loop_builder.JumpToHeader();
+  loop_builder.JumpToHeader(loop_depth_);
   loop_builder.EndLoop();
 }
 
@@ -1223,7 +1112,7 @@
     builder()->SetStatementPosition(stmt->next());
     Visit(stmt->next());
   }
-  loop_builder.JumpToHeader();
+  loop_builder.JumpToHeader(loop_depth_);
   loop_builder.EndLoop();
 }
 
@@ -1265,36 +1154,28 @@
     }
     case NAMED_SUPER_PROPERTY: {
       RegisterAllocationScope register_scope(this);
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      Register receiver = register_allocator()->NextConsecutiveRegister();
-      Register home_object = register_allocator()->NextConsecutiveRegister();
-      Register name = register_allocator()->NextConsecutiveRegister();
-      Register value = register_allocator()->NextConsecutiveRegister();
-      builder()->StoreAccumulatorInRegister(value);
+      RegisterList args = register_allocator()->NewRegisterList(4);
+      builder()->StoreAccumulatorInRegister(args[3]);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), receiver);
-      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(super_property->this_var(), args[0]);
+      VisitForRegisterValue(super_property->home_object(), args[1]);
       builder()
           ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
-          .StoreAccumulatorInRegister(name);
-      BuildNamedSuperPropertyStore(receiver, home_object, name, value);
+          .StoreAccumulatorInRegister(args[2])
+          .CallRuntime(StoreToSuperRuntimeId(), args);
       break;
     }
     case KEYED_SUPER_PROPERTY: {
       RegisterAllocationScope register_scope(this);
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      Register receiver = register_allocator()->NextConsecutiveRegister();
-      Register home_object = register_allocator()->NextConsecutiveRegister();
-      Register key = register_allocator()->NextConsecutiveRegister();
-      Register value = register_allocator()->NextConsecutiveRegister();
-      builder()->StoreAccumulatorInRegister(value);
+      RegisterList args = register_allocator()->NewRegisterList(4);
+      builder()->StoreAccumulatorInRegister(args[3]);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), receiver);
-      VisitForRegisterValue(super_property->home_object(), home_object);
-      VisitForRegisterValue(property->key(), key);
-      BuildKeyedSuperPropertyStore(receiver, home_object, key, value);
+      VisitForRegisterValue(super_property->this_var(), args[0]);
+      VisitForRegisterValue(super_property->home_object(), args[1]);
+      VisitForRegisterValue(property->key(), args[2]);
+      builder()->CallRuntime(StoreKeyedToSuperRuntimeId(), args);
       break;
     }
   }
@@ -1316,15 +1197,12 @@
   builder()->JumpIfUndefined(&subject_undefined_label);
   builder()->JumpIfNull(&subject_null_label);
   Register receiver = register_allocator()->NewRegister();
-  builder()->CastAccumulatorToJSObject(receiver);
+  builder()->ConvertAccumulatorToObject(receiver);
 
-  register_allocator()->PrepareForConsecutiveAllocations(3);
-  Register cache_type = register_allocator()->NextConsecutiveRegister();
-  Register cache_array = register_allocator()->NextConsecutiveRegister();
-  Register cache_length = register_allocator()->NextConsecutiveRegister();
   // Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
-  USE(cache_array);
-  builder()->ForInPrepare(receiver, cache_type);
+  RegisterList triple = register_allocator()->NewRegisterList(3);
+  Register cache_length = triple[2];
+  builder()->ForInPrepare(receiver, triple);
 
   // Set up loop counter
   Register index = register_allocator()->NewRegister();
@@ -1334,17 +1212,17 @@
   // The loop
   VisitIterationHeader(stmt, &loop_builder);
   builder()->SetExpressionAsStatementPosition(stmt->each());
-  builder()->ForInDone(index, cache_length);
-  loop_builder.BreakIfTrue();
-  DCHECK(Register::AreContiguous(cache_type, cache_array));
+  builder()->ForInContinue(index, cache_length);
+  loop_builder.BreakIfFalse();
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
-  builder()->ForInNext(receiver, index, cache_type, feedback_index(slot));
+  builder()->ForInNext(receiver, index, triple.Truncate(2),
+                       feedback_index(slot));
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
   VisitIterationBody(stmt, &loop_builder);
   builder()->ForInStep(index);
   builder()->StoreAccumulatorInRegister(index);
-  loop_builder.JumpToHeader();
+  loop_builder.JumpToHeader(loop_depth_);
   loop_builder.EndLoop();
   builder()->Bind(&subject_null_label);
   builder()->Bind(&subject_undefined_label);
@@ -1364,13 +1242,12 @@
 
   VisitForEffect(stmt->assign_each());
   VisitIterationBody(stmt, &loop_builder);
-  loop_builder.JumpToHeader();
+  loop_builder.JumpToHeader(loop_depth_);
   loop_builder.EndLoop();
 }
 
 void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   TryCatchBuilder try_control_builder(builder(), stmt->catch_prediction());
-  Register no_reg;
 
   // Preserve the context in a dedicated register, so that it can be restored
   // when the handler is entered by the stack-unwinding machinery.
@@ -1388,12 +1265,12 @@
   try_control_builder.EndTry();
 
   // Create a catch scope that binds the exception.
-  VisitNewLocalCatchContext(stmt->variable());
+  BuildNewLocalCatchContext(stmt->variable(), stmt->scope());
   builder()->StoreAccumulatorInRegister(context);
 
   // If requested, clear message object as we enter the catch block.
   if (stmt->clear_pending_message()) {
-    builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+    builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage);
   }
 
   // Load the catch context into the accumulator.
@@ -1406,7 +1283,6 @@
 
 void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
   TryFinallyBuilder try_control_builder(builder(), stmt->catch_prediction());
-  Register no_reg;
 
   // We keep a record of all paths that enter the finally-block to be able to
   // dispatch to the correct continuation point after the statements in the
@@ -1454,7 +1330,7 @@
 
   // Clear message object as we enter the finally block.
   builder()
-      ->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0)
+      ->CallRuntime(Runtime::kInterpreterClearPendingMessage)
       .StoreAccumulatorInRegister(message);
 
   // Evaluate the finally-block.
@@ -1462,7 +1338,7 @@
   try_control_builder.EndFinally();
 
   // Pending message object is restored on exit.
-  builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message, 1);
+  builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message);
 
   // Dynamic dispatch after the finally-block.
   commands.ApplyDeferredCommands();
@@ -1479,16 +1355,15 @@
   size_t entry = builder()->AllocateConstantPoolEntry();
   builder()->CreateClosure(entry, flags);
   function_literals_.push_back(std::make_pair(expr, entry));
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
   VisitClassLiteralForRuntimeDefinition(expr);
 
   // Load the "prototype" from the constructor.
-  register_allocator()->PrepareForConsecutiveAllocations(2);
-  Register literal = register_allocator()->NextConsecutiveRegister();
-  Register prototype = register_allocator()->NextConsecutiveRegister();
+  RegisterList args = register_allocator()->NewRegisterList(2);
+  Register literal = args[0];
+  Register prototype = args[1];
   FeedbackVectorSlot slot = expr->PrototypeSlot();
   builder()
       ->StoreAccumulatorInRegister(literal)
@@ -1496,7 +1371,7 @@
       .StoreAccumulatorInRegister(prototype);
 
   VisitClassLiteralProperties(expr, literal, prototype);
-  builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
+  builder()->CallRuntime(Runtime::kToFastProperties, literal);
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
     Variable* var = expr->class_variable_proxy()->var();
@@ -1505,49 +1380,37 @@
                                   : FeedbackVectorSlot::Invalid();
     VisitVariableAssignment(var, Token::INIT, slot);
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
     ClassLiteral* expr) {
-  AccumulatorResultScope result_scope(this);
-  register_allocator()->PrepareForConsecutiveAllocations(4);
-  Register extends = register_allocator()->NextConsecutiveRegister();
-  Register constructor = register_allocator()->NextConsecutiveRegister();
-  Register start_position = register_allocator()->NextConsecutiveRegister();
-  Register end_position = register_allocator()->NextConsecutiveRegister();
-
+  RegisterAllocationScope register_scope(this);
+  RegisterList args = register_allocator()->NewRegisterList(4);
   VisitForAccumulatorValueOrTheHole(expr->extends());
-  builder()->StoreAccumulatorInRegister(extends);
-
-  VisitForAccumulatorValue(expr->constructor());
+  builder()->StoreAccumulatorInRegister(args[0]);
+  VisitForRegisterValue(expr->constructor(), args[1]);
   builder()
-      ->StoreAccumulatorInRegister(constructor)
-      .LoadLiteral(Smi::FromInt(expr->start_position()))
-      .StoreAccumulatorInRegister(start_position)
+      ->LoadLiteral(Smi::FromInt(expr->start_position()))
+      .StoreAccumulatorInRegister(args[2])
       .LoadLiteral(Smi::FromInt(expr->end_position()))
-      .StoreAccumulatorInRegister(end_position)
-      .CallRuntime(Runtime::kDefineClass, extends, 4);
-  result_scope.SetResultInAccumulator();
+      .StoreAccumulatorInRegister(args[3])
+      .CallRuntime(Runtime::kDefineClass, args);
 }
 
 void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
                                                     Register literal,
                                                     Register prototype) {
   RegisterAllocationScope register_scope(this);
-  register_allocator()->PrepareForConsecutiveAllocations(5);
-  Register receiver = register_allocator()->NextConsecutiveRegister();
-  Register key = register_allocator()->NextConsecutiveRegister();
-  Register value = register_allocator()->NextConsecutiveRegister();
-  Register attr = register_allocator()->NextConsecutiveRegister();
-  Register set_function_name = register_allocator()->NextConsecutiveRegister();
+  RegisterList args = register_allocator()->NewRegisterList(5);
+  Register receiver = args[0], key = args[1], value = args[2], attr = args[3],
+           set_function_name = args[4];
 
   bool attr_assigned = false;
   Register old_receiver = Register::invalid_value();
 
   // Create nodes to store method values into the literal.
   for (int i = 0; i < expr->properties()->length(); i++) {
-    ObjectLiteral::Property* property = expr->properties()->at(i);
+    ClassLiteral::Property* property = expr->properties()->at(i);
 
     // Set-up receiver.
     Register new_receiver = property->is_static() ? literal : prototype;
@@ -1557,17 +1420,23 @@
     }
 
     VisitForAccumulatorValue(property->key());
-    builder()->CastAccumulatorToName(key);
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      VisitClassLiteralStaticPrototypeWithComputedName(key);
-    }
-    VisitForAccumulatorValue(property->value());
-    builder()->StoreAccumulatorInRegister(value);
+    builder()->ConvertAccumulatorToName(key);
 
+    if (property->is_static() && property->is_computed_name()) {
+      // The static prototype property is read only. We handle the non computed
+      // property name case in the parser. Since this is the only case where we
+      // need to check for an own read only property we special case this so we
+      // do not need to do this for every property.
+      BytecodeLabel done;
+      builder()
+          ->LoadLiteral(prototype_string())
+          .CompareOperation(Token::Value::EQ_STRICT, key)
+          .JumpIfFalse(&done)
+          .CallRuntime(Runtime::kThrowStaticPrototypeError)
+          .Bind(&done);
+    }
+
+    VisitForRegisterValue(property->value(), value);
     VisitSetHomeObject(value, receiver, property);
 
     if (!attr_assigned) {
@@ -1578,51 +1447,36 @@
     }
 
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::PROTOTYPE:
-        // Invalid properties for ES6 classes.
-        UNREACHABLE();
-        break;
-      case ObjectLiteral::Property::COMPUTED: {
+      case ClassLiteral::Property::METHOD: {
         builder()
             ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
-            .StoreAccumulatorInRegister(set_function_name);
-        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, receiver,
-                               5);
+            .StoreAccumulatorInRegister(set_function_name)
+            .CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
         break;
       }
-      case ObjectLiteral::Property::GETTER: {
+      case ClassLiteral::Property::GETTER: {
         builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
-                               receiver, 4);
+                               args.Truncate(4));
         break;
       }
-      case ObjectLiteral::Property::SETTER: {
+      case ClassLiteral::Property::SETTER: {
         builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
-                               receiver, 4);
+                               args.Truncate(4));
+        break;
+      }
+      case ClassLiteral::Property::FIELD: {
+        UNREACHABLE();
         break;
       }
     }
   }
 }
 
-void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
-    Register key) {
-  BytecodeLabel done;
-  builder()
-      ->LoadLiteral(prototype_string())
-      .CompareOperation(Token::Value::EQ_STRICT, key)
-      .JumpIfFalse(&done)
-      .CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
-      .Bind(&done);
-}
-
 void BytecodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
   size_t entry = builder()->AllocateConstantPoolEntry();
   builder()->CreateClosure(entry, NOT_TENURED);
   native_function_literals_.push_back(std::make_pair(expr, entry));
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
@@ -1652,8 +1506,6 @@
     VisitForAccumulatorValue(expr->else_expression());
     builder()->Bind(&end_label);
   }
-
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitLiteral(Literal* expr) {
@@ -1674,7 +1526,6 @@
     } else {
       builder()->LoadLiteral(raw_value->value());
     }
-    execution_result()->SetResultInAccumulator();
   }
 }
 
@@ -1682,7 +1533,6 @@
   // Materialize a regular expression literal.
   builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
                                  expr->flags());
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
@@ -1693,7 +1543,7 @@
       expr->ComputeFlags());
   // Allocate in the outer scope since this register is used to return the
   // expression's results to the caller.
-  Register literal = register_allocator()->outer()->NewRegister();
+  Register literal = register_allocator()->NewRegister();
   builder()->CreateObjectLiteral(expr->constant_properties(),
                                  expr->literal_index(), flags, literal);
 
@@ -1737,23 +1587,17 @@
             VisitForEffect(property->value());
           }
         } else {
-          register_allocator()->PrepareForConsecutiveAllocations(4);
-          Register literal_argument =
-              register_allocator()->NextConsecutiveRegister();
-          Register key = register_allocator()->NextConsecutiveRegister();
-          Register value = register_allocator()->NextConsecutiveRegister();
-          Register language = register_allocator()->NextConsecutiveRegister();
+          RegisterList args = register_allocator()->NewRegisterList(4);
 
-          builder()->MoveRegister(literal, literal_argument);
-          VisitForAccumulatorValue(property->key());
-          builder()->StoreAccumulatorInRegister(key);
-          VisitForAccumulatorValue(property->value());
-          builder()->StoreAccumulatorInRegister(value);
+          builder()->MoveRegister(literal, args[0]);
+          VisitForRegisterValue(property->key(), args[1]);
+          VisitForRegisterValue(property->value(), args[2]);
           if (property->emit_store()) {
             builder()
                 ->LoadLiteral(Smi::FromInt(SLOPPY))
-                .StoreAccumulatorInRegister(language)
-                .CallRuntime(Runtime::kSetProperty, literal_argument, 4);
+                .StoreAccumulatorInRegister(args[3])
+                .CallRuntime(Runtime::kSetProperty, args);
+            Register value = args[2];
             VisitSetHomeObject(value, literal, property);
           }
         }
@@ -1761,15 +1605,10 @@
       }
       case ObjectLiteral::Property::PROTOTYPE: {
         DCHECK(property->emit_store());
-        register_allocator()->PrepareForConsecutiveAllocations(2);
-        Register literal_argument =
-            register_allocator()->NextConsecutiveRegister();
-        Register value = register_allocator()->NextConsecutiveRegister();
-
-        builder()->MoveRegister(literal, literal_argument);
-        VisitForAccumulatorValue(property->value());
-        builder()->StoreAccumulatorInRegister(value).CallRuntime(
-            Runtime::kInternalSetPrototype, literal_argument, 2);
+        RegisterList args = register_allocator()->NewRegisterList(2);
+        builder()->MoveRegister(literal, args[0]);
+        VisitForRegisterValue(property->value(), args[1]);
+        builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
         break;
       }
       case ObjectLiteral::Property::GETTER:
@@ -1790,23 +1629,15 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end(); ++it) {
     RegisterAllocationScope inner_register_scope(this);
-    register_allocator()->PrepareForConsecutiveAllocations(5);
-    Register literal_argument = register_allocator()->NextConsecutiveRegister();
-    Register name = register_allocator()->NextConsecutiveRegister();
-    Register getter = register_allocator()->NextConsecutiveRegister();
-    Register setter = register_allocator()->NextConsecutiveRegister();
-    Register attr = register_allocator()->NextConsecutiveRegister();
-
-    builder()->MoveRegister(literal, literal_argument);
-    VisitForAccumulatorValue(it->first);
-    builder()->StoreAccumulatorInRegister(name);
-    VisitObjectLiteralAccessor(literal, it->second->getter, getter);
-    VisitObjectLiteralAccessor(literal, it->second->setter, setter);
+    RegisterList args = register_allocator()->NewRegisterList(5);
+    builder()->MoveRegister(literal, args[0]);
+    VisitForRegisterValue(it->first, args[1]);
+    VisitObjectLiteralAccessor(literal, it->second->getter, args[2]);
+    VisitObjectLiteralAccessor(literal, it->second->setter, args[3]);
     builder()
         ->LoadLiteral(Smi::FromInt(NONE))
-        .StoreAccumulatorInRegister(attr)
-        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked,
-                     literal_argument, 5);
+        .StoreAccumulatorInRegister(args[4])
+        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, args);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1824,66 +1655,68 @@
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(property->emit_store());
-      register_allocator()->PrepareForConsecutiveAllocations(2);
-      Register literal_argument =
-          register_allocator()->NextConsecutiveRegister();
-      Register value = register_allocator()->NextConsecutiveRegister();
-
-      builder()->MoveRegister(literal, literal_argument);
-      VisitForAccumulatorValue(property->value());
-      builder()->StoreAccumulatorInRegister(value).CallRuntime(
-          Runtime::kInternalSetPrototype, literal_argument, 2);
+      RegisterList args = register_allocator()->NewRegisterList(2);
+      builder()->MoveRegister(literal, args[0]);
+      VisitForRegisterValue(property->value(), args[1]);
+      builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
       continue;
     }
 
-    register_allocator()->PrepareForConsecutiveAllocations(5);
-    Register literal_argument = register_allocator()->NextConsecutiveRegister();
-    Register key = register_allocator()->NextConsecutiveRegister();
-    Register value = register_allocator()->NextConsecutiveRegister();
-    Register attr = register_allocator()->NextConsecutiveRegister();
-    DCHECK(Register::AreContiguous(literal_argument, key, value, attr));
-    Register set_function_name =
-        register_allocator()->NextConsecutiveRegister();
-
-    builder()->MoveRegister(literal, literal_argument);
-    VisitForAccumulatorValue(property->key());
-    builder()->CastAccumulatorToName(key);
-    VisitForAccumulatorValue(property->value());
-    builder()->StoreAccumulatorInRegister(value);
-    VisitSetHomeObject(value, literal, property);
-    builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
       case ObjectLiteral::Property::COMPUTED:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+        RegisterList args = register_allocator()->NewRegisterList(5);
+        builder()->MoveRegister(literal, args[0]);
+        VisitForAccumulatorValue(property->key());
+        builder()->ConvertAccumulatorToName(args[1]);
+        VisitForRegisterValue(property->value(), args[2]);
+        VisitSetHomeObject(args[2], literal, property);
         builder()
-            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
-            .StoreAccumulatorInRegister(set_function_name);
-        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral,
-                               literal_argument, 5);
+            ->LoadLiteral(Smi::FromInt(NONE))
+            .StoreAccumulatorInRegister(args[3])
+            .LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+            .StoreAccumulatorInRegister(args[4]);
+        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
         break;
+      }
+      case ObjectLiteral::Property::GETTER:
+      case ObjectLiteral::Property::SETTER: {
+        RegisterList args = register_allocator()->NewRegisterList(4);
+        builder()->MoveRegister(literal, args[0]);
+        VisitForAccumulatorValue(property->key());
+        builder()->ConvertAccumulatorToName(args[1]);
+        VisitForRegisterValue(property->value(), args[2]);
+        VisitSetHomeObject(args[2], literal, property);
+        builder()
+            ->LoadLiteral(Smi::FromInt(NONE))
+            .StoreAccumulatorInRegister(args[3]);
+        Runtime::FunctionId function_id =
+            property->kind() == ObjectLiteral::Property::GETTER
+                ? Runtime::kDefineGetterPropertyUnchecked
+                : Runtime::kDefineSetterPropertyUnchecked;
+        builder()->CallRuntime(function_id, args);
+        break;
+      }
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();  // Handled specially above.
         break;
-      case ObjectLiteral::Property::GETTER:
-        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
-                               literal_argument, 4);
-        break;
-      case ObjectLiteral::Property::SETTER:
-        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
-                               literal_argument, 4);
-        break;
     }
   }
 
-  execution_result()->SetResultInRegister(literal);
+  builder()->LoadAccumulatorWithRegister(literal);
 }
 
 void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   // Deep-copy the literal boilerplate.
+  int runtime_flags = expr->ComputeFlags();
+  bool use_fast_shallow_clone =
+      (runtime_flags & ArrayLiteral::kShallowElements) != 0 &&
+      expr->values()->length() <= JSArray::kInitialMaxFastElementArray;
+  uint8_t flags =
+      CreateArrayLiteralFlags::Encode(use_fast_shallow_clone, runtime_flags);
   builder()->CreateArrayLiteral(expr->constant_elements(),
-                                expr->literal_index(),
-                                expr->ComputeFlags(true));
+                                expr->literal_index(), flags);
   Register index, literal;
 
   // Evaluate all the non-constant subexpressions and store them into the
@@ -1915,7 +1748,6 @@
     // Restore literal array into accumulator.
     builder()->LoadAccumulatorWithRegister(literal);
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
@@ -1953,7 +1785,6 @@
       BuildHoleCheckForVariableLoad(variable);
       break;
     }
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       builder()->LoadGlobal(feedback_index(slot), typeof_mode);
       break;
@@ -1964,85 +1795,82 @@
       Register context_reg;
       if (context) {
         context_reg = context->reg();
+        depth = 0;
       } else {
-        context_reg = register_allocator()->NewRegister();
-        // Walk the context chain to find the context at the given depth.
-        // TODO(rmcilroy): Perform this work in a bytecode handler once we have
-        // a generic mechanism for performing jumps in interpreter.cc.
-        // TODO(mythria): Also update bytecode graph builder with correct depth
-        // when this changes.
-        builder()
-            ->LoadAccumulatorWithRegister(execution_context()->reg())
-            .StoreAccumulatorInRegister(context_reg);
-        for (int i = 0; i < depth; ++i) {
-          builder()
-              ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
-              .StoreAccumulatorInRegister(context_reg);
-        }
+        context_reg = execution_context()->reg();
       }
 
-      builder()->LoadContextSlot(context_reg, variable->index());
+      builder()->LoadContextSlot(context_reg, variable->index(), depth);
       BuildHoleCheckForVariableLoad(variable);
       break;
     }
     case VariableLocation::LOOKUP: {
-      builder()->LoadLookupSlot(variable->name(), typeof_mode);
+      switch (variable->mode()) {
+        case DYNAMIC_LOCAL: {
+          Variable* local_variable = variable->local_if_not_shadowed();
+          int depth =
+              execution_context()->ContextChainDepth(local_variable->scope());
+          builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
+                                           local_variable->index(), depth);
+          BuildHoleCheckForVariableLoad(variable);
+          break;
+        }
+        case DYNAMIC_GLOBAL: {
+          int depth = scope()->ContextChainLengthUntilOutermostSloppyEval();
+          builder()->LoadLookupGlobalSlot(variable->name(), typeof_mode,
+                                          feedback_index(slot), depth);
+          break;
+        }
+        default:
+          builder()->LoadLookupSlot(variable->name(), typeof_mode);
+      }
       break;
     }
-    case VariableLocation::MODULE:
-      UNREACHABLE();
+    case VariableLocation::MODULE: {
+      ModuleDescriptor* descriptor = scope()->GetModuleScope()->module();
+      if (variable->IsExport()) {
+        auto it = descriptor->regular_exports().find(variable->raw_name());
+        DCHECK(it != descriptor->regular_exports().end());
+        Register export_name = register_allocator()->NewRegister();
+        builder()
+            ->LoadLiteral(it->second->export_name->string())
+            .StoreAccumulatorInRegister(export_name)
+            .CallRuntime(Runtime::kLoadModuleExport, export_name);
+      } else {
+        auto it = descriptor->regular_imports().find(variable->raw_name());
+        DCHECK(it != descriptor->regular_imports().end());
+        RegisterList args = register_allocator()->NewRegisterList(2);
+        builder()
+            ->LoadLiteral(it->second->import_name->string())
+            .StoreAccumulatorInRegister(args[0])
+            .LoadLiteral(Smi::FromInt(it->second->module_request))
+            .StoreAccumulatorInRegister(args[1])
+            .CallRuntime(Runtime::kLoadModuleImport, args);
+      }
+      BuildHoleCheckForVariableLoad(variable);
+      break;
+    }
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
     Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
-  AccumulatorResultScope accumulator_result(this);
+  ValueResultScope accumulator_result(this);
   VisitVariableLoad(variable, slot, typeof_mode);
 }
 
-Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
-    Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
-  RegisterResultScope register_scope(this);
-  VisitVariableLoad(variable, slot, typeof_mode);
-  return register_scope.ResultRegister();
+void BytecodeGenerator::BuildReturn() {
+  if (FLAG_trace) {
+    RegisterAllocationScope register_scope(this);
+    Register result = register_allocator()->NewRegister();
+    // Runtime returns {result} value, preserving accumulator.
+    builder()->StoreAccumulatorInRegister(result).CallRuntime(
+        Runtime::kTraceExit, result);
+  }
+  builder()->Return();
 }
 
-void BytecodeGenerator::BuildNamedSuperPropertyLoad(Register receiver,
-                                                    Register home_object,
-                                                    Register name) {
-  DCHECK(Register::AreContiguous(receiver, home_object, name));
-  builder()->CallRuntime(Runtime::kLoadFromSuper, receiver, 3);
-}
-
-void BytecodeGenerator::BuildKeyedSuperPropertyLoad(Register receiver,
-                                                    Register home_object,
-                                                    Register key) {
-  DCHECK(Register::AreContiguous(receiver, home_object, key));
-  builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, receiver, 3);
-}
-
-void BytecodeGenerator::BuildNamedSuperPropertyStore(Register receiver,
-                                                     Register home_object,
-                                                     Register name,
-                                                     Register value) {
-  DCHECK(Register::AreContiguous(receiver, home_object, name, value));
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreToSuper_Strict
-                                        : Runtime::kStoreToSuper_Sloppy;
-  builder()->CallRuntime(function_id, receiver, 4);
-}
-
-void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
-                                                     Register home_object,
-                                                     Register key,
-                                                     Register value) {
-  DCHECK(Register::AreContiguous(receiver, home_object, key, value));
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreKeyedToSuper_Strict
-                                        : Runtime::kStoreKeyedToSuper_Sloppy;
-  builder()->CallRuntime(function_id, receiver, 4);
-}
+void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
 
 void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
   RegisterAllocationScope register_scope(this);
@@ -2050,14 +1878,14 @@
   builder()
       ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
       .StoreAccumulatorInRegister(reason)
-      .CallRuntime(Runtime::kAbort, reason, 1);
+      .CallRuntime(Runtime::kAbort, reason);
 }
 
 void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
   RegisterAllocationScope register_scope(this);
   Register name_reg = register_allocator()->NewRegister();
   builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
-      Runtime::kThrowReferenceError, name_reg, 1);
+      Runtime::kThrowReferenceError, name_reg);
 }
 
 void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
@@ -2083,7 +1911,6 @@
 
 void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
                                                             Token::Value op) {
-  DCHECK(variable->mode() != CONST_LEGACY);
   if (op != Token::INIT) {
     // Perform an initialization check for let/const declared variables.
     // E.g. let x = (x = 20); is not allowed.
@@ -2128,20 +1955,13 @@
         builder()->LoadAccumulatorWithRegister(value_temp);
       }
 
-      if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
-        if (mode == CONST || is_strict(language_mode())) {
-          builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
-                                 0);
-        }
-        // Non-initializing assignments to legacy constants are ignored
-        // in sloppy mode. Break here to avoid storing into variable.
-        break;
+      if (mode != CONST || op == Token::INIT) {
+        builder()->StoreAccumulatorInRegister(destination);
+      } else if (variable->throw_on_const_assignment(language_mode())) {
+        builder()->CallRuntime(Runtime::kThrowConstAssignError);
       }
-
-      builder()->StoreAccumulatorInRegister(destination);
       break;
     }
-    case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       builder()->StoreGlobal(variable->name(), feedback_index(slot),
                              language_mode());
@@ -2154,24 +1974,9 @@
 
       if (context) {
         context_reg = context->reg();
+        depth = 0;
       } else {
-        Register value_temp = register_allocator()->NewRegister();
-        context_reg = register_allocator()->NewRegister();
-        // Walk the context chain to find the context at the given depth.
-        // TODO(rmcilroy): Perform this work in a bytecode handler once we have
-        // a generic mechanism for performing jumps in interpreter.cc.
-        // TODO(mythria): Also update bytecode graph builder with correct depth
-        // when this changes.
-        builder()
-            ->StoreAccumulatorInRegister(value_temp)
-            .LoadAccumulatorWithRegister(execution_context()->reg())
-            .StoreAccumulatorInRegister(context_reg);
-        for (int i = 0; i < depth; ++i) {
-          builder()
-              ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
-              .StoreAccumulatorInRegister(context_reg);
-        }
-        builder()->LoadAccumulatorWithRegister(value_temp);
+        context_reg = execution_context()->reg();
       }
 
       if (hole_check_required) {
@@ -2179,38 +1984,57 @@
         Register value_temp = register_allocator()->NewRegister();
         builder()
             ->StoreAccumulatorInRegister(value_temp)
-            .LoadContextSlot(context_reg, variable->index());
+            .LoadContextSlot(context_reg, variable->index(), depth);
 
         BuildHoleCheckForVariableAssignment(variable, op);
         builder()->LoadAccumulatorWithRegister(value_temp);
       }
 
-      if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
-        if (mode == CONST || is_strict(language_mode())) {
-          builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
-                                 0);
-        }
-        // Non-initializing assignments to legacy constants are ignored
-        // in sloppy mode. Break here to avoid storing into variable.
-        break;
+      if (mode != CONST || op == Token::INIT) {
+        builder()->StoreContextSlot(context_reg, variable->index(), depth);
+      } else if (variable->throw_on_const_assignment(language_mode())) {
+        builder()->CallRuntime(Runtime::kThrowConstAssignError);
       }
-
-      builder()->StoreContextSlot(context_reg, variable->index());
       break;
     }
     case VariableLocation::LOOKUP: {
-      DCHECK_NE(CONST_LEGACY, variable->mode());
       builder()->StoreLookupSlot(variable->name(), language_mode());
       break;
     }
-    case VariableLocation::MODULE:
-      UNREACHABLE();
+    case VariableLocation::MODULE: {
+      DCHECK(IsDeclaredVariableMode(mode));
+
+      if (mode == CONST && op != Token::INIT) {
+        builder()->CallRuntime(Runtime::kThrowConstAssignError);
+        break;
+      }
+
+      // If we don't throw above, we know that we're dealing with an
+      // export because imports are const and we do not generate initializing
+      // assignments for them.
+      DCHECK(variable->IsExport());
+
+      ModuleDescriptor* mod = scope()->GetModuleScope()->module();
+      // There may be several export names for this local name, but it doesn't
+      // matter which one we pick, as they all map to the same cell.
+      auto it = mod->regular_exports().find(variable->raw_name());
+      DCHECK(it != mod->regular_exports().end());
+
+      RegisterList args = register_allocator()->NewRegisterList(2);
+      builder()
+          ->StoreAccumulatorInRegister(args[1])
+          .LoadLiteral(it->second->export_name->string())
+          .StoreAccumulatorInRegister(args[0])
+          .CallRuntime(Runtime::kStoreModuleExport, args);
+      break;
+    }
   }
 }
 
 void BytecodeGenerator::VisitAssignment(Assignment* expr) {
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
-  Register object, key, home_object, value;
+  Register object, key;
+  RegisterList super_property_args;
   Handle<String> name;
 
   // Left-hand side can only be a property, a global or a variable slot.
@@ -2229,44 +2053,29 @@
     }
     case KEYED_PROPERTY: {
       object = VisitForRegisterValue(property->obj());
-      if (expr->is_compound()) {
-        // Use VisitForAccumulator and store to register so that the key is
-        // still in the accumulator for loading the old value below.
-        key = register_allocator()->NewRegister();
-        VisitForAccumulatorValue(property->key());
-        builder()->StoreAccumulatorInRegister(key);
-      } else {
-        key = VisitForRegisterValue(property->key());
-      }
+      key = VisitForRegisterValue(property->key());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      object = register_allocator()->NextConsecutiveRegister();
-      home_object = register_allocator()->NextConsecutiveRegister();
-      key = register_allocator()->NextConsecutiveRegister();
-      value = register_allocator()->NextConsecutiveRegister();
+      super_property_args = register_allocator()->NewRegisterList(4);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), object);
-      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+      VisitForRegisterValue(super_property->home_object(),
+                            super_property_args[1]);
       builder()
           ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
-          .StoreAccumulatorInRegister(key);
+          .StoreAccumulatorInRegister(super_property_args[2]);
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      object = register_allocator()->NextConsecutiveRegister();
-      home_object = register_allocator()->NextConsecutiveRegister();
-      key = register_allocator()->NextConsecutiveRegister();
-      value = register_allocator()->NextConsecutiveRegister();
-      builder()->StoreAccumulatorInRegister(value);
+      super_property_args = register_allocator()->NewRegisterList(4);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), object);
-      VisitForRegisterValue(super_property->home_object(), home_object);
-      VisitForRegisterValue(property->key(), key);
+      VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+      VisitForRegisterValue(super_property->home_object(),
+                            super_property_args[1]);
+      VisitForRegisterValue(property->key(), super_property_args[2]);
       break;
     }
   }
@@ -2274,17 +2083,16 @@
   // Evaluate the value and potentially handle compound assignments by loading
   // the left-hand side value and performing a binary operation.
   if (expr->is_compound()) {
-    Register old_value;
+    Register old_value = register_allocator()->NewRegister();
     switch (assign_type) {
       case VARIABLE: {
         VariableProxy* proxy = expr->target()->AsVariableProxy();
-        old_value = VisitVariableLoadForRegisterValue(
-            proxy->var(), proxy->VariableFeedbackSlot());
+        VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+        builder()->StoreAccumulatorInRegister(old_value);
         break;
       }
       case NAMED_PROPERTY: {
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-        old_value = register_allocator()->NewRegister();
         builder()
             ->LoadNamedProperty(object, name, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
@@ -2294,22 +2102,23 @@
         // Key is already in accumulator at this point due to evaluating the
         // LHS above.
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-        old_value = register_allocator()->NewRegister();
         builder()
             ->LoadKeyedProperty(object, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
         break;
       }
       case NAMED_SUPER_PROPERTY: {
-        old_value = register_allocator()->NewRegister();
-        BuildNamedSuperPropertyLoad(object, home_object, key);
-        builder()->StoreAccumulatorInRegister(old_value);
+        builder()
+            ->CallRuntime(Runtime::kLoadFromSuper,
+                          super_property_args.Truncate(3))
+            .StoreAccumulatorInRegister(old_value);
         break;
       }
       case KEYED_SUPER_PROPERTY: {
-        old_value = register_allocator()->NewRegister();
-        BuildKeyedSuperPropertyLoad(object, home_object, key);
-        builder()->StoreAccumulatorInRegister(old_value);
+        builder()
+            ->CallRuntime(Runtime::kLoadKeyedFromSuper,
+                          super_property_args.Truncate(3))
+            .StoreAccumulatorInRegister(old_value);
         break;
       }
     }
@@ -2342,17 +2151,18 @@
                                     language_mode());
       break;
     case NAMED_SUPER_PROPERTY: {
-      builder()->StoreAccumulatorInRegister(value);
-      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      builder()
+          ->StoreAccumulatorInRegister(super_property_args[3])
+          .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      builder()->StoreAccumulatorInRegister(value);
-      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      builder()
+          ->StoreAccumulatorInRegister(super_property_args[3])
+          .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
       break;
     }
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitYield(Yield* expr) {
@@ -2382,12 +2192,12 @@
 
     Register input = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator)
         .StoreAccumulatorInRegister(input);
 
     Register resume_mode = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
         .StoreAccumulatorInRegister(resume_mode);
 
     // Now dispatch on resume mode.
@@ -2407,14 +2217,12 @@
 
     builder()->Bind(&resume_with_return);
     {
-      register_allocator()->PrepareForConsecutiveAllocations(2);
-      Register value = register_allocator()->NextConsecutiveRegister();
-      Register done = register_allocator()->NextConsecutiveRegister();
+      RegisterList args = register_allocator()->NewRegisterList(2);
       builder()
-          ->MoveRegister(input, value)
+          ->MoveRegister(input, args[0])
           .LoadTrue()
-          .StoreAccumulatorInRegister(done)
-          .CallRuntime(Runtime::kInlineCreateIterResultObject, value, 2);
+          .StoreAccumulatorInRegister(args[1])
+          .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
       execution_control()->ReturnAccumulator();
     }
 
@@ -2430,18 +2238,12 @@
     builder()->Bind(&resume_with_next);
     builder()->LoadAccumulatorWithRegister(input);
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
   builder()->SetExpressionPosition(expr);
   builder()->Throw();
-  // Throw statements are modeled as expressions instead of statements. These
-  // are converted from assignment statements in Rewriter::ReWrite pass. An
-  // assignment statement expects a value in the accumulator. This is a hack to
-  // avoid DCHECK fails assert accumulator has been set.
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
@@ -2469,56 +2271,45 @@
       VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
       break;
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
                                                         Property* expr) {
-  AccumulatorResultScope result_scope(this);
+  ValueResultScope result_scope(this);
   VisitPropertyLoad(obj, expr);
 }
 
 void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
                                                     Register opt_receiver_out) {
   RegisterAllocationScope register_scope(this);
-  register_allocator()->PrepareForConsecutiveAllocations(3);
-
-  Register receiver, home_object, name;
-  receiver = register_allocator()->NextConsecutiveRegister();
-  home_object = register_allocator()->NextConsecutiveRegister();
-  name = register_allocator()->NextConsecutiveRegister();
   SuperPropertyReference* super_property =
       property->obj()->AsSuperPropertyReference();
-  VisitForRegisterValue(super_property->this_var(), receiver);
-  VisitForRegisterValue(super_property->home_object(), home_object);
+  RegisterList args = register_allocator()->NewRegisterList(3);
+  VisitForRegisterValue(super_property->this_var(), args[0]);
+  VisitForRegisterValue(super_property->home_object(), args[1]);
   builder()
       ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
-      .StoreAccumulatorInRegister(name);
-  BuildNamedSuperPropertyLoad(receiver, home_object, name);
+      .StoreAccumulatorInRegister(args[2])
+      .CallRuntime(Runtime::kLoadFromSuper, args);
 
   if (opt_receiver_out.is_valid()) {
-    builder()->MoveRegister(receiver, opt_receiver_out);
+    builder()->MoveRegister(args[0], opt_receiver_out);
   }
 }
 
 void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
                                                     Register opt_receiver_out) {
   RegisterAllocationScope register_scope(this);
-  register_allocator()->PrepareForConsecutiveAllocations(3);
-
-  Register receiver, home_object, key;
-  receiver = register_allocator()->NextConsecutiveRegister();
-  home_object = register_allocator()->NextConsecutiveRegister();
-  key = register_allocator()->NextConsecutiveRegister();
   SuperPropertyReference* super_property =
       property->obj()->AsSuperPropertyReference();
-  VisitForRegisterValue(super_property->this_var(), receiver);
-  VisitForRegisterValue(super_property->home_object(), home_object);
-  VisitForRegisterValue(property->key(), key);
-  BuildKeyedSuperPropertyLoad(receiver, home_object, key);
+  RegisterList args = register_allocator()->NewRegisterList(3);
+  VisitForRegisterValue(super_property->this_var(), args[0]);
+  VisitForRegisterValue(super_property->home_object(), args[1]);
+  VisitForRegisterValue(property->key(), args[2]);
+  builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, args);
 
   if (opt_receiver_out.is_valid()) {
-    builder()->MoveRegister(receiver, opt_receiver_out);
+    builder()->MoveRegister(args[0], opt_receiver_out);
   }
 }
 
@@ -2533,36 +2324,13 @@
   }
 }
 
-Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
-  if (args->length() == 0) {
-    return Register();
+void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
+                                       RegisterList arg_regs,
+                                       size_t first_argument_register) {
+  // Visit arguments.
+  for (int i = 0; i < static_cast<int>(args->length()); i++) {
+    VisitForRegisterValue(args->at(i), arg_regs[first_argument_register + i]);
   }
-
-  // Visit arguments and place in a contiguous block of temporary
-  // registers.  Return the first temporary register corresponding to
-  // the first argument.
-  //
-  // NB the caller may have already called
-  // PrepareForConsecutiveAllocations() with args->length() + N. The
-  // second call here will be a no-op provided there have been N or
-  // less calls to NextConsecutiveRegister(). Otherwise, the arguments
-  // here will be consecutive, but they will not be consecutive with
-  // earlier consecutive allocations made by the caller.
-  register_allocator()->PrepareForConsecutiveAllocations(args->length());
-
-  // Visit for first argument that goes into returned register
-  Register first_arg = register_allocator()->NextConsecutiveRegister();
-  VisitForAccumulatorValue(args->at(0));
-  builder()->StoreAccumulatorInRegister(first_arg);
-
-  // Visit remaining arguments
-  for (int i = 1; i < static_cast<int>(args->length()); i++) {
-    Register ith_arg = register_allocator()->NextConsecutiveRegister();
-    VisitForAccumulatorValue(args->at(i));
-    builder()->StoreAccumulatorInRegister(ith_arg);
-    DCHECK(ith_arg.index() - i == first_arg.index());
-  }
-  return first_arg;
 }
 
 void BytecodeGenerator::VisitCall(Call* expr) {
@@ -2573,18 +2341,15 @@
     return VisitCallSuper(expr);
   }
 
+  Register callee = register_allocator()->NewRegister();
+
+  // Add an argument register for the receiver.
+  RegisterList args =
+      register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
+  Register receiver = args[0];
+
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
-
-  // The receiver and arguments need to be allocated consecutively for
-  // Call(). We allocate the callee and receiver consecutively for calls to
-  // %LoadLookupSlotForCall. Future optimizations could avoid this there are
-  // no arguments or the receiver and arguments are already consecutive.
-  ZoneList<Expression*>* args = expr->arguments();
-  register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
-  Register callee = register_allocator()->NextConsecutiveRegister();
-  Register receiver = register_allocator()->NextConsecutiveRegister();
-
   switch (call_type) {
     case Call::NAMED_PROPERTY_CALL:
     case Call::KEYED_PROPERTY_CALL: {
@@ -2613,12 +2378,13 @@
 
         // Call %LoadLookupSlotForCall to get the callee and receiver.
         DCHECK(Register::AreContiguous(callee, receiver));
+        RegisterList result_pair(callee.index(), 2);
         Variable* variable = callee_expr->AsVariableProxy()->var();
         builder()
             ->LoadLiteral(variable->name())
             .StoreAccumulatorInRegister(name)
-            .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name, 1,
-                                callee);
+            .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
+                                result_pair);
         break;
       }
       // Fall through.
@@ -2626,8 +2392,7 @@
     }
     case Call::OTHER_CALL: {
       builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
-      VisitForAccumulatorValue(callee_expr);
-      builder()->StoreAccumulatorInRegister(callee);
+      VisitForRegisterValue(callee_expr, callee);
       break;
     }
     case Call::NAMED_SUPER_PROPERTY_CALL: {
@@ -2647,42 +2412,34 @@
       break;
   }
 
-  // Evaluate all arguments to the function call and store in sequential
+  // Evaluate all arguments to the function call and store in sequential args
   // registers.
-  Register arg = VisitArguments(args);
-  CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
+  VisitArguments(expr->arguments(), args, 1);
 
   // Resolve callee for a potential direct eval call. This block will mutate the
   // callee value.
-  if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+  if (call_type == Call::POSSIBLY_EVAL_CALL &&
+      expr->arguments()->length() > 0) {
     RegisterAllocationScope inner_register_scope(this);
-    register_allocator()->PrepareForConsecutiveAllocations(6);
-    Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
-    Register source = register_allocator()->NextConsecutiveRegister();
-    Register function = register_allocator()->NextConsecutiveRegister();
-    Register language = register_allocator()->NextConsecutiveRegister();
-    Register eval_scope_position =
-        register_allocator()->NextConsecutiveRegister();
-    Register eval_position = register_allocator()->NextConsecutiveRegister();
-
     // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
     // strings and function closure, and loading language and
     // position.
+    RegisterList runtime_call_args = register_allocator()->NewRegisterList(6);
     builder()
-        ->MoveRegister(callee, callee_for_eval)
-        .MoveRegister(arg, source)
-        .MoveRegister(Register::function_closure(), function)
+        ->MoveRegister(callee, runtime_call_args[0])
+        .MoveRegister(args[1], runtime_call_args[1])
+        .MoveRegister(Register::function_closure(), runtime_call_args[2])
         .LoadLiteral(Smi::FromInt(language_mode()))
-        .StoreAccumulatorInRegister(language)
+        .StoreAccumulatorInRegister(runtime_call_args[3])
         .LoadLiteral(
             Smi::FromInt(execution_context()->scope()->start_position()))
-        .StoreAccumulatorInRegister(eval_scope_position)
+        .StoreAccumulatorInRegister(runtime_call_args[4])
         .LoadLiteral(Smi::FromInt(expr->position()))
-        .StoreAccumulatorInRegister(eval_position);
+        .StoreAccumulatorInRegister(runtime_call_args[5]);
 
     // Call ResolvePossiblyDirectEval and modify the callee.
     builder()
-        ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 6)
+        ->CallRuntime(Runtime::kResolvePossiblyDirectEval, runtime_call_args)
         .StoreAccumulatorInRegister(callee);
   }
 
@@ -2692,16 +2449,14 @@
   if (expr->CallFeedbackICSlot().IsInvalid()) {
     DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
     // Valid type feedback slots can only be greater than kReservedIndexCount.
-    // We use 0 to indicate an invalid slot it. Statically assert that 0 cannot
+    // We use 0 to indicate an invalid slot id. Statically assert that 0 cannot
     // be a valid slot id.
     STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
     feedback_slot_index = 0;
   } else {
     feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
   }
-  builder()->Call(callee, receiver, 1 + args->length(), feedback_slot_index,
-                  expr->tail_call_mode());
-  execution_result()->SetResultInAccumulator();
+  builder()->Call(callee, args, feedback_slot_index, expr->tail_call_mode());
 }
 
 void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2709,17 +2464,15 @@
   SuperCallReference* super = expr->expression()->AsSuperCallReference();
 
   // Prepare the constructor to the super call.
-  Register this_function = register_allocator()->NewRegister();
-  VisitForAccumulatorValue(super->this_function_var());
-  builder()
-      ->StoreAccumulatorInRegister(this_function)
-      .CallRuntime(Runtime::kInlineGetSuperConstructor, this_function, 1);
+  Register this_function = VisitForRegisterValue(super->this_function_var());
+  builder()->CallRuntime(Runtime::kInlineGetSuperConstructor, this_function);
 
   Register constructor = this_function;  // Re-use dead this_function register.
   builder()->StoreAccumulatorInRegister(constructor);
 
-  ZoneList<Expression*>* args = expr->arguments();
-  Register first_arg = VisitArguments(args);
+  RegisterList args =
+      register_allocator()->NewRegisterList(expr->arguments()->length());
+  VisitArguments(expr->arguments(), args);
 
   // The new target is loaded into the accumulator from the
   // {new.target} variable.
@@ -2727,51 +2480,51 @@
 
   // Call construct.
   builder()->SetExpressionPosition(expr);
-  builder()->New(constructor, first_arg, args->length());
-  execution_result()->SetResultInAccumulator();
+  // Valid type feedback slots can only be greater than kReservedIndexCount.
+  // Assert that 0 cannot be valid a valid slot id.
+  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  // Type feedback is not necessary for super constructor calls. The type
+  // information can be inferred in most cases. Slot id 0 indicates type
+  // feedback is not required.
+  builder()->New(constructor, args, 0);
 }
 
 void BytecodeGenerator::VisitCallNew(CallNew* expr) {
-  Register constructor = register_allocator()->NewRegister();
-  VisitForAccumulatorValue(expr->expression());
-  builder()->StoreAccumulatorInRegister(constructor);
-
-  ZoneList<Expression*>* args = expr->arguments();
-  Register first_arg = VisitArguments(args);
+  Register constructor = VisitForRegisterValue(expr->expression());
+  RegisterList args =
+      register_allocator()->NewRegisterList(expr->arguments()->length());
+  VisitArguments(expr->arguments(), args);
 
   builder()->SetExpressionPosition(expr);
   // The accumulator holds new target which is the same as the
   // constructor for CallNew.
   builder()
       ->LoadAccumulatorWithRegister(constructor)
-      .New(constructor, first_arg, args->length());
-  execution_result()->SetResultInAccumulator();
+      .New(constructor, args, feedback_index(expr->CallNewFeedbackSlot()));
 }
 
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
   if (expr->is_jsruntime()) {
     // Allocate a register for the receiver and load it with undefined.
-    register_allocator()->PrepareForConsecutiveAllocations(1 + args->length());
-    Register receiver = register_allocator()->NextConsecutiveRegister();
+    RegisterList args =
+        register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
+    Register receiver = args[0];
     builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
-    Register first_arg = VisitArguments(args);
-    CHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
-    builder()->CallJSRuntime(expr->context_index(), receiver,
-                             1 + args->length());
+    VisitArguments(expr->arguments(), args, 1);
+    builder()->CallJSRuntime(expr->context_index(), args);
   } else {
     // Evaluate all arguments to the runtime call.
-    Register first_arg = VisitArguments(args);
+    RegisterList args =
+        register_allocator()->NewRegisterList(expr->arguments()->length());
+    VisitArguments(expr->arguments(), args);
     Runtime::FunctionId function_id = expr->function()->function_id;
-    builder()->CallRuntime(function_id, first_arg, args->length());
+    builder()->CallRuntime(function_id, args);
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
   VisitForEffect(expr->expression());
   builder()->LoadUndefined();
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
@@ -2785,7 +2538,6 @@
     VisitForAccumulatorValue(expr->expression());
   }
   builder()->TypeOf();
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
@@ -2802,7 +2554,6 @@
   } else {
     VisitForAccumulatorValue(expr->expression());
     builder()->LogicalNot();
-    execution_result()->SetResultInAccumulator();
   }
 }
 
@@ -2846,16 +2597,15 @@
     Variable* variable = proxy->var();
     DCHECK(is_sloppy(language_mode()) || variable->is_this());
     switch (variable->location()) {
-      case VariableLocation::GLOBAL:
       case VariableLocation::UNALLOCATED: {
         // Global var, let, const or variables not explicitly declared.
         Register native_context = register_allocator()->NewRegister();
         Register global_object = register_allocator()->NewRegister();
         builder()
             ->LoadContextSlot(execution_context()->reg(),
-                              Context::NATIVE_CONTEXT_INDEX)
+                              Context::NATIVE_CONTEXT_INDEX, 0)
             .StoreAccumulatorInRegister(native_context)
-            .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
+            .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0)
             .StoreAccumulatorInRegister(global_object)
             .LoadLiteral(variable->name())
             .Delete(global_object, language_mode());
@@ -2878,7 +2628,7 @@
         builder()
             ->LoadLiteral(variable->name())
             .StoreAccumulatorInRegister(name_reg)
-            .CallRuntime(Runtime::kDeleteLookupSlot, name_reg, 1);
+            .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
         break;
       }
       default:
@@ -2889,7 +2639,6 @@
     VisitForEffect(expr->expression());
     builder()->LoadTrue();
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
@@ -2902,7 +2651,8 @@
   bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
 
   // Evaluate LHS expression and get old value.
-  Register object, home_object, key, old_value, value;
+  Register object, key, old_value;
+  RegisterList super_property_args;
   Handle<String> name;
   switch (assign_type) {
     case VARIABLE: {
@@ -2930,44 +2680,36 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      object = register_allocator()->NextConsecutiveRegister();
-      home_object = register_allocator()->NextConsecutiveRegister();
-      key = register_allocator()->NextConsecutiveRegister();
-      value = register_allocator()->NextConsecutiveRegister();
+      super_property_args = register_allocator()->NewRegisterList(4);
+      RegisterList load_super_args = super_property_args.Truncate(3);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), object);
-      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+      VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
       builder()
           ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
-          .StoreAccumulatorInRegister(key);
-      BuildNamedSuperPropertyLoad(object, home_object, key);
+          .StoreAccumulatorInRegister(load_super_args[2])
+          .CallRuntime(Runtime::kLoadFromSuper, load_super_args);
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      register_allocator()->PrepareForConsecutiveAllocations(4);
-      object = register_allocator()->NextConsecutiveRegister();
-      home_object = register_allocator()->NextConsecutiveRegister();
-      key = register_allocator()->NextConsecutiveRegister();
-      value = register_allocator()->NextConsecutiveRegister();
-      builder()->StoreAccumulatorInRegister(value);
+      super_property_args = register_allocator()->NewRegisterList(4);
+      RegisterList load_super_args = super_property_args.Truncate(3);
       SuperPropertyReference* super_property =
           property->obj()->AsSuperPropertyReference();
-      VisitForRegisterValue(super_property->this_var(), object);
-      VisitForRegisterValue(super_property->home_object(), home_object);
-      VisitForRegisterValue(property->key(), key);
-      BuildKeyedSuperPropertyLoad(object, home_object, key);
+      VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+      VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
+      VisitForRegisterValue(property->key(), load_super_args[2]);
+      builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
       break;
     }
   }
 
   // Save result for postfix expressions.
   if (is_postfix) {
-    old_value = register_allocator()->outer()->NewRegister();
-
     // Convert old value into a number before saving it.
-    builder()->CastAccumulatorToNumber(old_value);
+    old_value = register_allocator()->NewRegister();
+    builder()->ConvertAccumulatorToNumber(old_value);
   }
 
   // Perform +1/-1 operation.
@@ -2994,22 +2736,22 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      builder()->StoreAccumulatorInRegister(value);
-      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      builder()
+          ->StoreAccumulatorInRegister(super_property_args[3])
+          .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      builder()->StoreAccumulatorInRegister(value);
-      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      builder()
+          ->StoreAccumulatorInRegister(super_property_args[3])
+          .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
       break;
     }
   }
 
   // Restore old value for postfix expressions.
   if (is_postfix) {
-    execution_result()->SetResultInRegister(old_value);
-  } else {
-    execution_result()->SetResultInAccumulator();
+    builder()->LoadAccumulatorWithRegister(old_value);
   }
 }
 
@@ -3034,8 +2776,8 @@
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
   builder()->SetExpressionPosition(expr);
-  builder()->CompareOperation(expr->op(), lhs);
-  execution_result()->SetResultInAccumulator();
+  FeedbackVectorSlot slot = expr->CompareOperationFeedbackSlot();
+  builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
 }
 
 void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
@@ -3045,7 +2787,6 @@
   VisitForAccumulatorValue(expr->right());
   FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
   builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
-  execution_result()->SetResultInAccumulator();
 }
 
 void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
@@ -3055,7 +2796,7 @@
 }
 
 void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  execution_result()->SetResultInRegister(Register::function_closure());
+  builder()->LoadAccumulatorWithRegister(Register::function_closure());
 }
 
 void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
@@ -3065,8 +2806,7 @@
 
 void BytecodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
-  execution_result()->SetResultInAccumulator();
+  builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
 }
 
 void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
@@ -3106,7 +2846,6 @@
       VisitForAccumulatorValue(right);
       builder()->Bind(&end_label);
     }
-    execution_result()->SetResultInAccumulator();
   }
 }
 
@@ -3142,7 +2881,6 @@
       VisitForAccumulatorValue(right);
       builder()->Bind(&end_label);
     }
-    execution_result()->SetResultInAccumulator();
   }
 }
 
@@ -3150,35 +2888,45 @@
   Visit(expr->expression());
 }
 
-void BytecodeGenerator::VisitNewLocalFunctionContext() {
-  AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalActivationContext() {
+  ValueResultScope value_execution_result(this);
   Scope* scope = this->scope();
 
-  // Allocate a new local context.
+  // Create the appropriate context.
   if (scope->is_script_scope()) {
-    RegisterAllocationScope register_scope(this);
-    Register closure = register_allocator()->NewRegister();
-    Register scope_info = register_allocator()->NewRegister();
-    DCHECK(Register::AreContiguous(closure, scope_info));
+    RegisterList args = register_allocator()->NewRegisterList(2);
     builder()
         ->LoadAccumulatorWithRegister(Register::function_closure())
-        .StoreAccumulatorInRegister(closure)
+        .StoreAccumulatorInRegister(args[0])
         .LoadLiteral(scope->scope_info())
-        .StoreAccumulatorInRegister(scope_info)
-        .CallRuntime(Runtime::kNewScriptContext, closure, 2);
+        .StoreAccumulatorInRegister(args[1])
+        .CallRuntime(Runtime::kNewScriptContext, args);
+  } else if (scope->is_module_scope()) {
+    // We don't need to do anything for the outer script scope.
+    DCHECK(scope->outer_scope()->is_script_scope());
+
+    // A JSFunction representing a module is called with the module object as
+    // its sole argument, which we pass on to PushModuleContext.
+    RegisterList args = register_allocator()->NewRegisterList(3);
+    builder()
+        ->MoveRegister(builder()->Parameter(1), args[0])
+        .LoadAccumulatorWithRegister(Register::function_closure())
+        .StoreAccumulatorInRegister(args[1])
+        .LoadLiteral(scope->scope_info())
+        .StoreAccumulatorInRegister(args[2])
+        .CallRuntime(Runtime::kPushModuleContext, args);
   } else {
     int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
       builder()->CreateFunctionContext(slot_count);
     } else {
       builder()->CallRuntime(Runtime::kNewFunctionContext,
-                             Register::function_closure(), 1);
+                             Register::function_closure());
     }
   }
-  execution_result()->SetResultInAccumulator();
 }
 
-void BytecodeGenerator::VisitBuildLocalActivationContext() {
+void BytecodeGenerator::BuildLocalActivationContextInitialization() {
   DeclarationScope* scope = this->scope();
 
   if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
@@ -3187,7 +2935,7 @@
     // Context variable (at bottom of the context chain).
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
     builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
-        execution_context()->reg(), variable->index());
+        execution_context()->reg(), variable->index(), 0);
   }
 
   // Copy parameters into context if necessary.
@@ -3201,56 +2949,53 @@
     Register parameter(builder()->Parameter(i + 1));
     // Context variable (at bottom of the context chain).
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
-    builder()->LoadAccumulatorWithRegister(parameter)
-        .StoreContextSlot(execution_context()->reg(), variable->index());
+    builder()->LoadAccumulatorWithRegister(parameter).StoreContextSlot(
+        execution_context()->reg(), variable->index(), 0);
   }
 }
 
-void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
-  AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalBlockContext(Scope* scope) {
+  ValueResultScope value_execution_result(this);
   DCHECK(scope->is_block_scope());
 
   VisitFunctionClosureForContext();
   builder()->CreateBlockContext(scope->scope_info());
-  execution_result()->SetResultInAccumulator();
 }
 
-void BytecodeGenerator::VisitNewLocalWithContext() {
-  AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
+  ValueResultScope value_execution_result(this);
 
   Register extension_object = register_allocator()->NewRegister();
 
-  builder()->CastAccumulatorToJSObject(extension_object);
+  builder()->ConvertAccumulatorToObject(extension_object);
   VisitFunctionClosureForContext();
-  builder()->CreateWithContext(extension_object);
-  execution_result()->SetResultInAccumulator();
+  builder()->CreateWithContext(extension_object, scope->scope_info());
 }
 
-void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
-  AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalCatchContext(Variable* variable,
+                                                  Scope* scope) {
+  ValueResultScope value_execution_result(this);
   DCHECK(variable->IsContextSlot());
 
   Register exception = register_allocator()->NewRegister();
   builder()->StoreAccumulatorInRegister(exception);
   VisitFunctionClosureForContext();
-  builder()->CreateCatchContext(exception, variable->name());
-  execution_result()->SetResultInAccumulator();
+  builder()->CreateCatchContext(exception, variable->name(),
+                                scope->scope_info());
 }
 
 void BytecodeGenerator::VisitObjectLiteralAccessor(
     Register home_object, ObjectLiteralProperty* property, Register value_out) {
-  // TODO(rmcilroy): Replace value_out with VisitForRegister();
   if (property == nullptr) {
     builder()->LoadNull().StoreAccumulatorInRegister(value_out);
   } else {
-    VisitForAccumulatorValue(property->value());
-    builder()->StoreAccumulatorInRegister(value_out);
+    VisitForRegisterValue(property->value(), value_out);
     VisitSetHomeObject(value_out, home_object, property);
   }
 }
 
 void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
-                                           ObjectLiteralProperty* property,
+                                           LiteralProperty* property,
                                            int slot_number) {
   Expression* expr = property->value();
   if (FunctionLiteral::NeedsHomeObject(expr)) {
@@ -3302,38 +3047,44 @@
   // Store the new target we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::new_target());
   VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+
+  // TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
+  // and we need to make sure {BytecodeRegisterOptimizer} flushes its state
+  // before a local variable containing the <new.target> is used. Using a label
+  // as below flushes the entire pipeline, we should be more specific here.
+  BytecodeLabel flush_state_label;
+  builder()->Bind(&flush_state_label);
 }
 
 void BytecodeGenerator::VisitFunctionClosureForContext() {
-  AccumulatorResultScope accumulator_execution_result(this);
+  ValueResultScope value_execution_result(this);
   DeclarationScope* closure_scope =
       execution_context()->scope()->GetClosureScope();
-  if (closure_scope->is_script_scope() ||
-      closure_scope->is_module_scope()) {
+  if (closure_scope->is_script_scope()) {
     // Contexts nested in the native context have a canonical empty function as
     // their closure, not the anonymous closure containing the global code.
     Register native_context = register_allocator()->NewRegister();
     builder()
         ->LoadContextSlot(execution_context()->reg(),
-                          Context::NATIVE_CONTEXT_INDEX)
+                          Context::NATIVE_CONTEXT_INDEX, 0)
         .StoreAccumulatorInRegister(native_context)
-        .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+        .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0);
   } else if (closure_scope->is_eval_scope()) {
     // Contexts created by a call to eval have the same closure as the
     // context calling eval, not the anonymous closure containing the eval
     // code. Fetch it from the context.
     builder()->LoadContextSlot(execution_context()->reg(),
-                               Context::CLOSURE_INDEX);
+                               Context::CLOSURE_INDEX, 0);
   } else {
-    DCHECK(closure_scope->is_function_scope());
+    DCHECK(closure_scope->is_function_scope() ||
+           closure_scope->is_module_scope());
     builder()->LoadAccumulatorWithRegister(Register::function_closure());
   }
-  execution_result()->SetResultInAccumulator();
 }
 
 // Visits the expression |expr| and places the result in the accumulator.
 void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
-  AccumulatorResultScope accumulator_scope(this);
+  ValueResultScope accumulator_scope(this);
   Visit(expr);
 }
 
@@ -3354,16 +3105,17 @@
 // Visits the expression |expr| and returns the register containing
 // the expression result.
 Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
-  RegisterResultScope register_scope(this);
-  Visit(expr);
-  return register_scope.ResultRegister();
+  VisitForAccumulatorValue(expr);
+  Register result = register_allocator()->NewRegister();
+  builder()->StoreAccumulatorInRegister(result);
+  return result;
 }
 
 // Visits the expression |expr| and stores the expression result in
 // |destination|.
 void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
                                               Register destination) {
-  AccumulatorResultScope register_scope(this);
+  ValueResultScope register_scope(this);
   Visit(expr);
   builder()->StoreAccumulatorInRegister(destination);
 }
@@ -3412,6 +3164,16 @@
   return TypeFeedbackVector::GetIndex(slot);
 }
 
+Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
+  return is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+                                    : Runtime::kStoreToSuper_Sloppy;
+}
+
+Runtime::FunctionId BytecodeGenerator::StoreKeyedToSuperRuntimeId() {
+  return is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+                                    : Runtime::kStoreKeyedToSuper_Sloppy;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index ee72135..03067de 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -24,7 +24,7 @@
  public:
   explicit BytecodeGenerator(CompilationInfo* info);
 
-  void GenerateBytecode();
+  void GenerateBytecode(uintptr_t stack_limit);
   Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
 
 #define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -36,7 +36,6 @@
   void VisitStatements(ZoneList<Statement*>* statments);
 
  private:
-  class AccumulatorResultScope;
   class ContextScope;
   class ControlScope;
   class ControlScopeForBreakable;
@@ -47,9 +46,9 @@
   class ExpressionResultScope;
   class EffectResultScope;
   class GlobalDeclarationsBuilder;
-  class RegisterResultScope;
   class RegisterAllocationScope;
   class TestResultScope;
+  class ValueResultScope;
 
   enum class TestFallthrough { kThen, kElse, kNone };
 
@@ -73,8 +72,10 @@
   // Used by flow control routines to evaluate loop condition.
   void VisitCondition(Expression* expr);
 
-  // Helper visitors which perform common operations.
-  Register VisitArguments(ZoneList<Expression*>* arguments);
+  // Visit the arguments expressions in |args| and store them in |args_regs|
+  // starting at register |first_argument_register| in the list.
+  void VisitArguments(ZoneList<Expression*>* args, RegisterList arg_regs,
+                      size_t first_argument_register = 0);
 
   // Visit a keyed super property load. The optional
   // |opt_receiver_out| register will have the receiver stored to it
@@ -104,15 +105,8 @@
   void VisitVariableAssignment(Variable* variable, Token::Value op,
                                FeedbackVectorSlot slot);
 
-  void BuildNamedSuperPropertyStore(Register receiver, Register home_object,
-                                    Register name, Register value);
-  void BuildKeyedSuperPropertyStore(Register receiver, Register home_object,
-                                    Register key, Register value);
-  void BuildNamedSuperPropertyLoad(Register receiver, Register home_object,
-                                   Register name);
-  void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
-                                   Register key);
-
+  void BuildReturn();
+  void BuildReThrow();
   void BuildAbort(BailoutReason bailout_reason);
   void BuildThrowIfHole(Handle<String> name);
   void BuildThrowIfNotHole(Handle<String> name);
@@ -125,6 +119,12 @@
   void BuildIndexedJump(Register value, size_t start_index, size_t size,
                         ZoneVector<BytecodeLabel>& targets);
 
+  void BuildNewLocalActivationContext();
+  void BuildLocalActivationContextInitialization();
+  void BuildNewLocalBlockContext(Scope* scope);
+  void BuildNewLocalCatchContext(Variable* variable, Scope* scope);
+  void BuildNewLocalWithContext(Scope* scope);
+
   void VisitGeneratorPrologue();
 
   void VisitArgumentsObject(Variable* variable);
@@ -133,18 +133,12 @@
   void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
   void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
                                    Register prototype);
-  void VisitClassLiteralStaticPrototypeWithComputedName(Register name);
   void VisitThisFunctionVariable(Variable* variable);
   void VisitNewTargetVariable(Variable* variable);
-  void VisitNewLocalFunctionContext();
-  void VisitBuildLocalActivationContext();
   void VisitBlockDeclarationsAndStatements(Block* stmt);
-  void VisitNewLocalBlockContext(Scope* scope);
-  void VisitNewLocalCatchContext(Variable* variable);
-  void VisitNewLocalWithContext();
   void VisitFunctionClosureForContext();
   void VisitSetHomeObject(Register value, Register home_object,
-                          ObjectLiteralProperty* property, int slot_number = 0);
+                          LiteralProperty* property, int slot_number = 0);
   void VisitObjectLiteralAccessor(Register home_object,
                                   ObjectLiteralProperty* property,
                                   Register value_out);
@@ -168,13 +162,10 @@
   void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
                     BytecodeLabels* else_labels, TestFallthrough fallthrough);
 
-  // Methods for tracking and remapping register.
-  void RecordStoreToRegister(Register reg);
-  Register LoadFromAliasedRegister(Register reg);
-
-  // Initialize an array of temporary registers with consecutive registers.
-  template <size_t N>
-  void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
+  // Returns the runtime function id for a store to super for the function's
+  // language mode.
+  inline Runtime::FunctionId StoreToSuperRuntimeId();
+  inline Runtime::FunctionId StoreKeyedToSuperRuntimeId();
 
   inline BytecodeArrayBuilder* builder() const { return builder_; }
   inline Zone* zone() const { return zone_; }
@@ -193,12 +184,8 @@
     execution_result_ = execution_result;
   }
   ExpressionResultScope* execution_result() const { return execution_result_; }
-  inline void set_register_allocator(
-      RegisterAllocationScope* register_allocator) {
-    register_allocator_ = register_allocator;
-  }
-  RegisterAllocationScope* register_allocator() const {
-    return register_allocator_;
+  BytecodeRegisterAllocator* register_allocator() const {
+    return builder()->register_allocator();
   }
 
   GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
@@ -222,7 +209,6 @@
   ControlScope* execution_control_;
   ContextScope* execution_context_;
   ExpressionResultScope* execution_result_;
-  RegisterAllocationScope* register_allocator_;
 
   ZoneVector<BytecodeLabel> generator_resume_points_;
   Register generator_state_;
diff --git a/src/interpreter/bytecode-label.h b/src/interpreter/bytecode-label.h
index d96cf66..b5f602d 100644
--- a/src/interpreter/bytecode-label.h
+++ b/src/interpreter/bytecode-label.h
@@ -5,7 +5,7 @@
 #ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
 #define V8_INTERPRETER_BYTECODE_LABEL_H_
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/interpreter/bytecode-operands.cc b/src/interpreter/bytecode-operands.cc
new file mode 100644
index 0000000..6be81fe
--- /dev/null
+++ b/src/interpreter/bytecode-operands.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-operands.h"
+
+#include <iomanip>
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+namespace {
+
+const char* AccumulatorUseToString(AccumulatorUse accumulator_use) {
+  switch (accumulator_use) {
+    case AccumulatorUse::kNone:
+      return "None";
+    case AccumulatorUse::kRead:
+      return "Read";
+    case AccumulatorUse::kWrite:
+      return "Write";
+    case AccumulatorUse::kReadWrite:
+      return "ReadWrite";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+const char* OperandTypeToString(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return #Name;
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return "";
+}
+
+const char* OperandScaleToString(OperandScale operand_scale) {
+  switch (operand_scale) {
+#define CASE(Name, _)         \
+  case OperandScale::k##Name: \
+    return #Name;
+    OPERAND_SCALE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return "";
+}
+
+const char* OperandSizeToString(OperandSize operand_size) {
+  switch (operand_size) {
+    case OperandSize::kNone:
+      return "None";
+    case OperandSize::kByte:
+      return "Byte";
+    case OperandSize::kShort:
+      return "Short";
+    case OperandSize::kQuad:
+      return "Quad";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+}  // namespace
+
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
+  return os << AccumulatorUseToString(use);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
+  return os << OperandSizeToString(operand_size);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
+  return os << OperandScaleToString(operand_scale);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+  return os << OperandTypeToString(operand_type);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-operands.h b/src/interpreter/bytecode-operands.h
new file mode 100644
index 0000000..b35c486
--- /dev/null
+++ b/src/interpreter/bytecode-operands.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_OPERANDS_H_
+#define V8_INTERPRETER_BYTECODE_OPERANDS_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V)        \
+  V(RegList, OperandTypeInfo::kScalableSignedByte) \
+  V(Reg, OperandTypeInfo::kScalableSignedByte)     \
+  V(RegPair, OperandTypeInfo::kScalableSignedByte)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)          \
+  V(RegOut, OperandTypeInfo::kScalableSignedByte)     \
+  V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
+  V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
+
+#define SCALAR_OPERAND_TYPE_LIST(V)                   \
+  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
+  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
+  V(UImm, OperandTypeInfo::kScalableUnsignedByte)     \
+  V(Imm, OperandTypeInfo::kScalableSignedByte)        \
+  V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+
+#define REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+  REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
+
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  INVALID_OPERAND_TYPE_LIST(V)            \
+  SCALAR_OPERAND_TYPE_LIST(V)
+
+// The list of operand types used by bytecodes.
+#define OPERAND_TYPE_LIST(V)        \
+  NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_OPERAND_TYPE_LIST(V)
+
+// Enumeration of scaling factors applicable to scalable operands. Code
+// relies on being able to cast values to integer scaling values.
+#define OPERAND_SCALE_LIST(V) \
+  V(Single, 1)                \
+  V(Double, 2)                \
+  V(Quadruple, 4)
+
+enum class OperandScale : uint8_t {
+#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
+  OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
+#undef DECLARE_OPERAND_SCALE
+      kLast = kQuadruple
+};
+
+// Enumeration of the size classes of operand types used by
+// bytecodes. Code relies on being able to cast values to integer
+// types to get the size in bytes.
+enum class OperandSize : uint8_t {
+  kNone = 0,
+  kByte = 1,
+  kShort = 2,
+  kQuad = 4,
+  kLast = kQuad
+};
+
+// Primitive operand info used that summarize properties of operands.
+// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
+#define OPERAND_TYPE_INFO_LIST(V)                         \
+  V(None, false, false, OperandSize::kNone)               \
+  V(ScalableSignedByte, true, false, OperandSize::kByte)  \
+  V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
+  V(FixedUnsignedByte, false, true, OperandSize::kByte)   \
+  V(FixedUnsignedShort, false, true, OperandSize::kShort)
+
+enum class OperandTypeInfo : uint8_t {
+#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
+  OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
+};
+
+// Enumeration of operand types used by bytecodes.
+enum class OperandType : uint8_t {
+#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
+  OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
+#undef DECLARE_OPERAND_TYPE
+#define COUNT_OPERAND_TYPES(x, _) +1
+  // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
+  // evaluate to the same value as the last operand.
+  kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
+#undef COUNT_OPERAND_TYPES
+};
+
+enum class AccumulatorUse : uint8_t {
+  kNone = 0,
+  kRead = 1 << 0,
+  kWrite = 1 << 1,
+  kReadWrite = kRead | kWrite
+};
+
+inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) & static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) | static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_OPERANDS_H_
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index 11aebb6..c87d31c 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,17 +13,17 @@
 
 BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
     BytecodePipelineStage* next_stage)
-    : next_stage_(next_stage) {
+    : next_stage_(next_stage), last_(Bytecode::kIllegal) {
   InvalidateLast();
 }
 
 // override
 Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
-    Isolate* isolate, int fixed_register_count, int parameter_count,
+    Isolate* isolate, int register_count, int parameter_count,
     Handle<FixedArray> handler_table) {
   Flush();
-  return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
-                                      parameter_count, handler_table);
+  return next_stage_->ToBytecodeArray(isolate, register_count, parameter_count,
+                                      handler_table);
 }
 
 // override
@@ -142,7 +142,7 @@
   current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
                         current->operand(1));
   if (last->source_info().is_valid()) {
-    current->source_info().Clone(last->source_info());
+    current->source_info_ptr()->Clone(last->source_info());
   }
 }
 
@@ -153,7 +153,7 @@
   current->set_bytecode(new_bytecode, 0, current->operand(0),
                         current->operand(1));
   if (last->source_info().is_valid()) {
-    current->source_info().Clone(last->source_info());
+    current->source_info_ptr()->Clone(last->source_info());
   }
 }
 
@@ -223,7 +223,7 @@
       // |node| can not have a valid source position if the source
       // position of last() is valid (per rules in
       // CanElideLastBasedOnSourcePosition()).
-      node->source_info().Clone(last()->source_info());
+      node->source_info_ptr()->Clone(last()->source_info());
     }
     SetLast(node);
   } else {
@@ -314,7 +314,7 @@
   if (!CanElideLastBasedOnSourcePosition(node)) {
     next_stage()->Write(last());
   } else if (!node->source_info().is_valid()) {
-    node->source_info().Clone(last()->source_info());
+    node->source_info_ptr()->Clone(last()->source_info());
   }
   InvalidateLast();
 }
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
index 2f4a35f..cedd742 100644
--- a/src/interpreter/bytecode-peephole-optimizer.h
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -28,7 +28,7 @@
   void BindLabel(BytecodeLabel* label) override;
   void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
   Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int fixed_register_count, int parameter_count,
+      Isolate* isolate, int register_count, int parameter_count,
       Handle<FixedArray> handler_table) override;
 
  private:
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
index 66b8bdf..6e6a6b6 100644
--- a/src/interpreter/bytecode-pipeline.cc
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -11,45 +11,6 @@
 namespace internal {
 namespace interpreter {
 
-BytecodeNode::BytecodeNode(Bytecode bytecode) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
-  bytecode_ = bytecode;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
-  bytecode_ = bytecode;
-  operands_[0] = operand0;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
-  bytecode_ = bytecode;
-  operands_[0] = operand0;
-  operands_[1] = operand1;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, uint32_t operand2) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
-  bytecode_ = bytecode;
-  operands_[0] = operand0;
-  operands_[1] = operand1;
-  operands_[2] = operand2;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, uint32_t operand2,
-                           uint32_t operand3) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
-  bytecode_ = bytecode;
-  operands_[0] = operand0;
-  operands_[1] = operand1;
-  operands_[2] = operand2;
-  operands_[3] = operand3;
-}
-
 BytecodeNode::BytecodeNode(const BytecodeNode& other) {
   memcpy(this, &other, sizeof(other));
 }
@@ -83,23 +44,6 @@
 #endif  // DEBUG
 }
 
-void BytecodeNode::Transform(Bytecode new_bytecode, uint32_t extra_operand) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
-            Bytecodes::NumberOfOperands(bytecode()) + 1);
-  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
-         Bytecodes::GetOperandType(new_bytecode, 0) ==
-             Bytecodes::GetOperandType(bytecode(), 0));
-  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
-         Bytecodes::GetOperandType(new_bytecode, 1) ==
-             Bytecodes::GetOperandType(bytecode(), 1));
-  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
-         Bytecodes::GetOperandType(new_bytecode, 2) ==
-             Bytecodes::GetOperandType(bytecode(), 2));
-  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
-  operands_[operand_count()] = extra_operand;
-  bytecode_ = new_bytecode;
-}
-
 bool BytecodeNode::operator==(const BytecodeNode& other) const {
   if (this == &other) {
     return true;
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index 1668bab..0b1a1f1 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -9,7 +9,7 @@
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -47,7 +47,7 @@
 
   // Flush the pipeline and generate a bytecode array.
   virtual Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int fixed_register_count, int parameter_count,
+      Isolate* isolate, int register_count, int parameter_count,
       Handle<FixedArray> handler_table) = 0;
 };
 
@@ -134,21 +134,69 @@
 
   PositionType position_type_;
   int source_position_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
 };
 
 // A container for a generated bytecode, it's operands, and source information.
 // These must be allocated by a BytecodeNodeAllocator instance.
 class BytecodeNode final : ZoneObject {
  public:
-  explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2, uint32_t operand3);
+  INLINE(BytecodeNode(const Bytecode bytecode,
+                      BytecodeSourceInfo* source_info = nullptr))
+      : bytecode_(bytecode),
+        operand_count_(0),
+        operand_scale_(OperandScale::kSingle) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+    AttachSourceInfo(source_info);
+  }
+
+  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+                      BytecodeSourceInfo* source_info = nullptr))
+      : bytecode_(bytecode),
+        operand_count_(1),
+        operand_scale_(OperandScale::kSingle) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+    SetOperand(0, operand0);
+    AttachSourceInfo(source_info);
+  }
+
+  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+                      uint32_t operand1,
+                      BytecodeSourceInfo* source_info = nullptr))
+      : bytecode_(bytecode),
+        operand_count_(2),
+        operand_scale_(OperandScale::kSingle) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+    SetOperand(0, operand0);
+    SetOperand(1, operand1);
+    AttachSourceInfo(source_info);
+  }
+
+  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+                      uint32_t operand1, uint32_t operand2,
+                      BytecodeSourceInfo* source_info = nullptr))
+      : bytecode_(bytecode),
+        operand_count_(3),
+        operand_scale_(OperandScale::kSingle) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+    SetOperand(0, operand0);
+    SetOperand(1, operand1);
+    SetOperand(2, operand2);
+    AttachSourceInfo(source_info);
+  }
+
+  INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+                      uint32_t operand1, uint32_t operand2, uint32_t operand3,
+                      BytecodeSourceInfo* source_info = nullptr))
+      : bytecode_(bytecode),
+        operand_count_(4),
+        operand_scale_(OperandScale::kSingle) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+    SetOperand(0, operand0);
+    SetOperand(1, operand1);
+    SetOperand(2, operand2);
+    SetOperand(3, operand3);
+    AttachSourceInfo(source_info);
+  }
 
   BytecodeNode(const BytecodeNode& other);
   BytecodeNode& operator=(const BytecodeNode& other);
@@ -162,25 +210,33 @@
   void set_bytecode(Bytecode bytecode) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
     bytecode_ = bytecode;
+    operand_count_ = 0;
+    operand_scale_ = OperandScale::kSingle;
   }
   void set_bytecode(Bytecode bytecode, uint32_t operand0) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
     bytecode_ = bytecode;
-    operands_[0] = operand0;
+    operand_count_ = 1;
+    operand_scale_ = OperandScale::kSingle;
+    SetOperand(0, operand0);
   }
   void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
     bytecode_ = bytecode;
-    operands_[0] = operand0;
-    operands_[1] = operand1;
+    operand_count_ = 2;
+    operand_scale_ = OperandScale::kSingle;
+    SetOperand(0, operand0);
+    SetOperand(1, operand1);
   }
   void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
                     uint32_t operand2) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
     bytecode_ = bytecode;
-    operands_[0] = operand0;
-    operands_[1] = operand1;
-    operands_[2] = operand2;
+    operand_count_ = 3;
+    operand_scale_ = OperandScale::kSingle;
+    SetOperand(0, operand0);
+    SetOperand(1, operand1);
+    SetOperand(2, operand2);
   }
 
   // Clone |other|.
@@ -191,7 +247,36 @@
 
   // Transform to a node representing |new_bytecode| which has one
   // operand more than the current bytecode.
-  void Transform(Bytecode new_bytecode, uint32_t extra_operand);
+  void Transform(Bytecode new_bytecode, uint32_t extra_operand) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
+              Bytecodes::NumberOfOperands(bytecode()) + 1);
+    DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
+           Bytecodes::GetOperandType(new_bytecode, 0) ==
+               Bytecodes::GetOperandType(bytecode(), 0));
+    DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
+           Bytecodes::GetOperandType(new_bytecode, 1) ==
+               Bytecodes::GetOperandType(bytecode(), 1));
+    DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
+           Bytecodes::GetOperandType(new_bytecode, 2) ==
+               Bytecodes::GetOperandType(bytecode(), 2));
+    DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
+
+    bytecode_ = new_bytecode;
+    operand_count_++;
+    SetOperand(operand_count() - 1, extra_operand);
+  }
+
+  // Updates the operand at |operand_index| to |operand|.
+  void UpdateOperand(int operand_index, uint32_t operand) {
+    DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
+    operands_[operand_index] = operand;
+    if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
+         Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
+        (Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
+         Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
+      UpdateScale();
+    }
+  }
 
   Bytecode bytecode() const { return bytecode_; }
 
@@ -199,22 +284,60 @@
     DCHECK_LT(i, operand_count());
     return operands_[i];
   }
-  uint32_t* operands() { return operands_; }
   const uint32_t* operands() const { return operands_; }
 
-  int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
+  int operand_count() const { return operand_count_; }
+  OperandScale operand_scale() const { return operand_scale_; }
 
   const BytecodeSourceInfo& source_info() const { return source_info_; }
-  BytecodeSourceInfo& source_info() { return source_info_; }
+  BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
 
   bool operator==(const BytecodeNode& other) const;
   bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
 
  private:
-  static const int kInvalidPosition = kMinInt;
+  INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
+    if (source_info && source_info->is_valid()) {
+      // Statement positions need to be emitted immediately.  Expression
+      // positions can be pushed back until a bytecode is found that can
+      // throw (if expression position filtering is turned on). We only
+      // invalidate the existing source position information if it is used.
+      if (source_info->is_statement() ||
+          !FLAG_ignition_filter_expression_positions ||
+          !Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
+        source_info_.Clone(*source_info);
+        source_info->set_invalid();
+      }
+    }
+  }
+
+  INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
+    if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
+      operand_scale_ =
+          std::max(operand_scale_, Bytecodes::ScaleForSignedOperand(operand));
+    } else if (Bytecodes::OperandIsScalableUnsignedByte(bytecode(),
+                                                        operand_index)) {
+      operand_scale_ =
+          std::max(operand_scale_, Bytecodes::ScaleForUnsignedOperand(operand));
+    }
+  }
+
+  INLINE(void SetOperand(int operand_index, uint32_t operand)) {
+    operands_[operand_index] = operand;
+    UpdateScaleForOperand(operand_index, operand);
+  }
+
+  void UpdateScale() {
+    operand_scale_ = OperandScale::kSingle;
+    for (int i = 0; i < operand_count(); i++) {
+      UpdateScaleForOperand(i, operands_[i]);
+    }
+  }
 
   Bytecode bytecode_;
   uint32_t operands_[Bytecodes::kMaxOperands];
+  int operand_count_;
+  OperandScale operand_scale_;
   BytecodeSourceInfo source_info_;
 };
 
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
deleted file mode 100644
index 10afcdc..0000000
--- a/src/interpreter/bytecode-register-allocator.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/bytecode-register-allocator.h"
-
-#include "src/interpreter/bytecode-array-builder.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
-                                                       int allocation_base)
-    : free_temporaries_(zone),
-      allocation_base_(allocation_base),
-      allocation_count_(0),
-      observer_(nullptr) {}
-
-Register TemporaryRegisterAllocator::first_temporary_register() const {
-  DCHECK(allocation_count() > 0);
-  return Register(allocation_base());
-}
-
-Register TemporaryRegisterAllocator::last_temporary_register() const {
-  DCHECK(allocation_count() > 0);
-  return Register(allocation_base() + allocation_count() - 1);
-}
-
-void TemporaryRegisterAllocator::set_observer(
-    TemporaryRegisterObserver* observer) {
-  DCHECK(observer_ == nullptr);
-  observer_ = observer;
-}
-
-int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
-  allocation_count_ += 1;
-  return allocation_base() + allocation_count() - 1;
-}
-
-int TemporaryRegisterAllocator::BorrowTemporaryRegister() {
-  if (free_temporaries_.empty()) {
-    return AllocateTemporaryRegister();
-  } else {
-    auto pos = free_temporaries_.begin();
-    int retval = *pos;
-    free_temporaries_.erase(pos);
-    return retval;
-  }
-}
-
-int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange(
-    int start_index, int end_index) {
-  if (free_temporaries_.empty()) {
-    int next_allocation = allocation_base() + allocation_count();
-    while (next_allocation >= start_index && next_allocation <= end_index) {
-      free_temporaries_.insert(AllocateTemporaryRegister());
-      next_allocation += 1;
-    }
-    return AllocateTemporaryRegister();
-  }
-
-  ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index);
-  if (index == free_temporaries_.begin()) {
-    // If start_index is the first free register, check for a register
-    // greater than end_index.
-    index = free_temporaries_.upper_bound(end_index);
-    if (index == free_temporaries_.end()) {
-      return AllocateTemporaryRegister();
-    }
-  } else {
-    // If there is a free register < start_index
-    index--;
-  }
-
-  int retval = *index;
-  free_temporaries_.erase(index);
-  return retval;
-}
-
-int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
-    size_t count) {
-  if (count == 0) {
-    return -1;
-  }
-
-  // TODO(oth): replace use of set<> here for free_temporaries with a
-  // more efficient structure. And/or partition into two searches -
-  // one before the translation window and one after.
-
-  // A run will require at least |count| free temporaries.
-  while (free_temporaries_.size() < count) {
-    free_temporaries_.insert(AllocateTemporaryRegister());
-  }
-
-  // Search within existing temporaries for a run.
-  auto start = free_temporaries_.begin();
-  size_t run_length = 0;
-  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
-    int expected = *start + static_cast<int>(run_length);
-    if (*run_end != expected) {
-      start = run_end;
-      run_length = 0;
-    }
-    if (++run_length == count) {
-      return *start;
-    }
-  }
-
-  // Continue run if possible across existing last temporary.
-  if (allocation_count_ > 0 && (start == free_temporaries_.end() ||
-                                *start + static_cast<int>(run_length) !=
-                                    last_temporary_register().index() + 1)) {
-    run_length = 0;
-  }
-
-  // Pad temporaries if extended run would cross translation boundary.
-  Register reg_first(*start);
-  Register reg_last(*start + static_cast<int>(count) - 1);
-
-  // Ensure enough registers for run.
-  while (run_length++ < count) {
-    free_temporaries_.insert(AllocateTemporaryRegister());
-  }
-
-  int run_start =
-      last_temporary_register().index() - static_cast<int>(count) + 1;
-  return run_start;
-}
-
-bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const {
-  if (allocation_count_ > 0) {
-    DCHECK(reg >= first_temporary_register() &&
-           reg <= last_temporary_register());
-    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
-  } else {
-    return false;
-  }
-}
-
-void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
-    int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
-  free_temporaries_.erase(reg_index);
-}
-
-void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
-  free_temporaries_.insert(reg_index);
-  if (observer_) {
-    observer_->TemporaryRegisterFreeEvent(Register(reg_index));
-  }
-}
-
-BytecodeRegisterAllocator::BytecodeRegisterAllocator(
-    Zone* zone, TemporaryRegisterAllocator* allocator)
-    : base_allocator_(allocator),
-      allocated_(zone),
-      next_consecutive_register_(-1),
-      next_consecutive_count_(-1) {}
-
-BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
-  for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
-    base_allocator()->ReturnTemporaryRegister(*i);
-  }
-  allocated_.clear();
-}
-
-Register BytecodeRegisterAllocator::NewRegister() {
-  int allocated = -1;
-  if (next_consecutive_count_ <= 0) {
-    allocated = base_allocator()->BorrowTemporaryRegister();
-  } else {
-    allocated = base_allocator()->BorrowTemporaryRegisterNotInRange(
-        next_consecutive_register_,
-        next_consecutive_register_ + next_consecutive_count_ - 1);
-  }
-  allocated_.push_back(allocated);
-  return Register(allocated);
-}
-
-bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
-    Register reg) const {
-  for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
-    if (*i == reg.index()) return true;
-  }
-  return false;
-}
-
-void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
-  if (static_cast<int>(count) > next_consecutive_count_) {
-    next_consecutive_register_ =
-        base_allocator()->PrepareForConsecutiveTemporaryRegisters(count);
-    next_consecutive_count_ = static_cast<int>(count);
-  }
-}
-
-Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
-  DCHECK_GE(next_consecutive_register_, 0);
-  DCHECK_GT(next_consecutive_count_, 0);
-  base_allocator()->BorrowConsecutiveTemporaryRegister(
-      next_consecutive_register_);
-  allocated_.push_back(next_consecutive_register_);
-  next_consecutive_count_--;
-  return Register(next_consecutive_register_++);
-}
-
-}  // namespace interpreter
-}  // namespace internal
-}  // namespace v8
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index b8f737b..e9de466 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -5,106 +5,76 @@
 #ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 #define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 
+#include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-class BytecodeArrayBuilder;
-class Register;
-class TemporaryRegisterObserver;
-
-class TemporaryRegisterAllocator final {
- public:
-  TemporaryRegisterAllocator(Zone* zone, int start_index);
-
-  // Borrow a temporary register.
-  int BorrowTemporaryRegister();
-
-  // Borrow a temporary register from the register range outside of
-  // |start_index| to |end_index|.
-  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
-
-  // Return a temporary register when no longer used.
-  void ReturnTemporaryRegister(int reg_index);
-
-  // Ensure a run of consecutive registers is available. Each register in
-  // the range should be borrowed with BorrowConsecutiveTemporaryRegister().
-  // Returns the start index of the run.
-  int PrepareForConsecutiveTemporaryRegisters(size_t count);
-
-  // Borrow a register from a range prepared with
-  // PrepareForConsecutiveTemporaryRegisters().
-  void BorrowConsecutiveTemporaryRegister(int reg_index);
-
-  // Returns true if |reg| is a temporary register and is currently
-  // borrowed.
-  bool RegisterIsLive(Register reg) const;
-
-  // Returns the first register in the range of temporary registers.
-  Register first_temporary_register() const;
-
-  // Returns the last register in the range of temporary registers.
-  Register last_temporary_register() const;
-
-  // Returns the start index of temporary register allocations.
-  int allocation_base() const { return allocation_base_; }
-
-  // Returns the number of temporary register allocations made.
-  int allocation_count() const { return allocation_count_; }
-
-  // Sets an observer for temporary register events.
-  void set_observer(TemporaryRegisterObserver* observer);
-
- private:
-  // Allocate a temporary register.
-  int AllocateTemporaryRegister();
-
-  ZoneSet<int> free_temporaries_;
-  int allocation_base_;
-  int allocation_count_;
-  TemporaryRegisterObserver* observer_;
-
-  DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
-};
-
-class TemporaryRegisterObserver {
- public:
-  virtual ~TemporaryRegisterObserver() {}
-  virtual void TemporaryRegisterFreeEvent(Register reg) = 0;
-};
-
-// A class that allows the instantiator to allocate temporary registers that are
-// cleaned up when scope is closed.
+// A class that allows the allocation of contiguous temporary registers.
 class BytecodeRegisterAllocator final {
  public:
-  explicit BytecodeRegisterAllocator(Zone* zone,
-                                     TemporaryRegisterAllocator* allocator);
-  ~BytecodeRegisterAllocator();
-  Register NewRegister();
+  // Enables observation of register allocation and free events.
+  class Observer {
+   public:
+    virtual ~Observer() {}
+    virtual void RegisterAllocateEvent(Register reg) = 0;
+    virtual void RegisterListAllocateEvent(RegisterList reg_list) = 0;
+    virtual void RegisterListFreeEvent(RegisterList reg_list) = 0;
+  };
 
-  // Ensure |count| consecutive allocations are available.
-  void PrepareForConsecutiveAllocations(size_t count);
+  explicit BytecodeRegisterAllocator(int start_index)
+      : next_register_index_(start_index),
+        max_register_count_(start_index),
+        observer_(nullptr) {}
+  ~BytecodeRegisterAllocator() {}
 
-  // Get the next consecutive allocation after calling
-  // PrepareForConsecutiveAllocations.
-  Register NextConsecutiveRegister();
+  // Returns a new register.
+  Register NewRegister() {
+    Register reg(next_register_index_++);
+    max_register_count_ = std::max(next_register_index_, max_register_count_);
+    if (observer_) {
+      observer_->RegisterAllocateEvent(reg);
+    }
+    return reg;
+  }
 
-  // Returns true if |reg| is allocated in this allocator.
-  bool RegisterIsAllocatedInThisScope(Register reg) const;
+  // Returns a consecutive list of |count| new registers.
+  RegisterList NewRegisterList(int count) {
+    RegisterList reg_list(next_register_index_, count);
+    next_register_index_ += count;
+    max_register_count_ = std::max(next_register_index_, max_register_count_);
+    if (observer_) {
+      observer_->RegisterListAllocateEvent(reg_list);
+    }
+    return reg_list;
+  }
 
-  // Returns true if unused consecutive allocations remain.
-  bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+  // Release all registers above |register_index|.
+  void ReleaseRegisters(int register_index) {
+    if (observer_) {
+      observer_->RegisterListFreeEvent(
+          RegisterList(register_index, next_register_index_ - register_index));
+    }
+    next_register_index_ = register_index;
+  }
+
+  // Returns true if the register |reg| is a live register.
+  bool RegisterIsLive(Register reg) const {
+    return reg.index() < next_register_index_;
+  }
+
+  void set_observer(Observer* observer) { observer_ = observer; }
+
+  int next_register_index() const { return next_register_index_; }
+  int maximum_register_count() const { return max_register_count_; }
 
  private:
-  TemporaryRegisterAllocator* base_allocator() const { return base_allocator_; }
-
-  TemporaryRegisterAllocator* base_allocator_;
-  ZoneVector<int> allocated_;
-  int next_consecutive_register_;
-  int next_consecutive_count_;
+  int next_register_index_;
+  int max_register_count_;
+  Observer* observer_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
 };
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
index d28f215..acbe0ba 100644
--- a/src/interpreter/bytecode-register-optimizer.cc
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -15,10 +15,12 @@
 // register is materialized in the bytecode stream.
 class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
  public:
-  RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
+  RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized,
+               bool allocated)
       : register_(reg),
         equivalence_id_(equivalence_id),
         materialized_(materialized),
+        allocated_(allocated),
         next_(this),
         prev_(this) {}
 
@@ -48,12 +50,17 @@
   // exists.
   RegisterInfo* GetEquivalentToMaterialize();
 
+  // Marks all temporary registers of the equivalence set as unmaterialized.
+  void MarkTemporariesAsUnmaterialized(Register temporary_base);
+
   // Get an equivalent register. Returns this if none exists.
   RegisterInfo* GetEquivalent();
 
   Register register_value() const { return register_; }
   bool materialized() const { return materialized_; }
   void set_materialized(bool materialized) { materialized_ = materialized; }
+  bool allocated() const { return allocated_; }
+  void set_allocated(bool allocated) { allocated_ = allocated; }
   void set_equivalence_id(uint32_t equivalence_id) {
     equivalence_id_ = equivalence_id;
   }
@@ -63,6 +70,7 @@
   Register register_;
   uint32_t equivalence_id_;
   bool materialized_;
+  bool allocated_;
 
   // Equivalence set pointers.
   RegisterInfo* next_;
@@ -155,8 +163,9 @@
     if (visitor->materialized()) {
       return nullptr;
     }
-    if (best_info == nullptr ||
-        visitor->register_value() < best_info->register_value()) {
+    if (visitor->allocated() &&
+        (best_info == nullptr ||
+         visitor->register_value() < best_info->register_value())) {
       best_info = visitor;
     }
     visitor = visitor->next_;
@@ -164,16 +173,31 @@
   return best_info;
 }
 
+void BytecodeRegisterOptimizer::RegisterInfo::MarkTemporariesAsUnmaterialized(
+    Register temporary_base) {
+  DCHECK(this->register_value() < temporary_base);
+  DCHECK(this->materialized());
+  RegisterInfo* visitor = this->next_;
+  while (visitor != this) {
+    if (visitor->register_value() >= temporary_base) {
+      visitor->set_materialized(false);
+    }
+    visitor = visitor->next_;
+  }
+}
+
 BytecodeRegisterOptimizer::RegisterInfo*
 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
   return next_;
 }
 
 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
-    Zone* zone, TemporaryRegisterAllocator* register_allocator,
-    int parameter_count, BytecodePipelineStage* next_stage)
+    Zone* zone, BytecodeRegisterAllocator* register_allocator,
+    int fixed_registers_count, int parameter_count,
+    BytecodePipelineStage* next_stage)
     : accumulator_(Register::virtual_accumulator()),
-      temporary_base_(register_allocator->allocation_base()),
+      temporary_base_(fixed_registers_count),
+      max_register_index_(fixed_registers_count - 1),
       register_info_table_(zone),
       equivalence_id_(0),
       next_stage_(next_stage),
@@ -198,7 +222,7 @@
                               static_cast<size_t>(temporary_base_.index()));
   for (size_t i = 0; i < register_info_table_.size(); ++i) {
     register_info_table_[i] = new (zone) RegisterInfo(
-        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
+        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true, true);
     DCHECK_EQ(register_info_table_[i]->register_value().index(),
               RegisterFromRegisterInfoTableIndex(i).index());
   }
@@ -208,15 +232,17 @@
 
 // override
 Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
-    Isolate* isolate, int fixed_register_count, int parameter_count,
+    Isolate* isolate, int register_count, int parameter_count,
     Handle<FixedArray> handler_table) {
   FlushState();
-  return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+  return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
                                       parameter_count, handler_table);
 }
 
 // override
 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
+  // Jumps are handled by WriteJump.
+  DCHECK(!Bytecodes::IsJump(node->bytecode()));
   //
   // Transfers with observable registers as the destination will be
   // immediately materialized so the source position information will
@@ -245,18 +271,16 @@
       break;
   }
 
-  if (Bytecodes::IsJump(node->bytecode()) ||
-      node->bytecode() == Bytecode::kDebugger ||
+  if (node->bytecode() == Bytecode::kDebugger ||
       node->bytecode() == Bytecode::kSuspendGenerator) {
     // All state must be flushed before emitting
-    // - a jump (due to how bytecode offsets for jumps are evaluated),
     // - a call to the debugger (as it can manipulate locals and parameters),
     // - a generator suspend (as this involves saving all registers).
     FlushState();
   }
 
   PrepareOperands(node);
-  WriteToNextStage(node);
+  next_stage_->Write(node);
 }
 
 // override
@@ -295,7 +319,7 @@
       // own equivalence set.
       RegisterInfo* equivalent;
       while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
-        if (!equivalent->materialized()) {
+        if (equivalent->allocated() && !equivalent->materialized()) {
           OutputRegisterTransfer(reg_info, equivalent);
         }
         equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
@@ -306,38 +330,29 @@
   flush_required_ = false;
 }
 
-void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
-  next_stage_->Write(node);
-}
-
-void BytecodeRegisterOptimizer::WriteToNextStage(
-    BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
-  if (source_info.is_valid()) {
-    node->source_info().Clone(source_info);
-  }
-  next_stage_->Write(node);
-}
-
 void BytecodeRegisterOptimizer::OutputRegisterTransfer(
     RegisterInfo* input_info, RegisterInfo* output_info,
-    const BytecodeSourceInfo& source_info) {
+    BytecodeSourceInfo* source_info) {
   Register input = input_info->register_value();
   Register output = output_info->register_value();
   DCHECK_NE(input.index(), output.index());
 
   if (input == accumulator_) {
     uint32_t operand = static_cast<uint32_t>(output.ToOperand());
-    BytecodeNode node(Bytecode::kStar, operand);
-    WriteToNextStage(&node, source_info);
+    BytecodeNode node(Bytecode::kStar, operand, source_info);
+    next_stage_->Write(&node);
   } else if (output == accumulator_) {
     uint32_t operand = static_cast<uint32_t>(input.ToOperand());
-    BytecodeNode node(Bytecode::kLdar, operand);
-    WriteToNextStage(&node, source_info);
+    BytecodeNode node(Bytecode::kLdar, operand, source_info);
+    next_stage_->Write(&node);
   } else {
     uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
     uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
-    BytecodeNode node(Bytecode::kMov, operand0, operand1);
-    WriteToNextStage(&node, source_info);
+    BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
+    next_stage_->Write(&node);
+  }
+  if (output != accumulator_) {
+    max_register_index_ = std::max(max_register_index_, output.index());
   }
   output_info->set_materialized(true);
 }
@@ -389,7 +404,7 @@
 
 void BytecodeRegisterOptimizer::RegisterTransfer(
     RegisterInfo* input_info, RegisterInfo* output_info,
-    const BytecodeSourceInfo& source_info) {
+    BytecodeSourceInfo* source_info) {
   // Materialize an alternate in the equivalence set that
   // |output_info| is leaving.
   if (output_info->materialized()) {
@@ -408,42 +423,48 @@
     output_info->set_materialized(false);
     RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
     OutputRegisterTransfer(materialized_info, output_info, source_info);
-  } else if (source_info.is_valid()) {
+  } else if (source_info->is_valid()) {
     // Emit a placeholder nop to maintain source position info.
     EmitNopForSourceInfo(source_info);
   }
+
+  bool input_is_observable = RegisterIsObservable(input_info->register_value());
+  if (input_is_observable) {
+    // If input is observable by the debugger, mark all other temporaries
+    // registers as unmaterialized so that this register is used in preference.
+    input_info->MarkTemporariesAsUnmaterialized(temporary_base_);
+  }
 }
 
 void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
-    const BytecodeSourceInfo& source_info) const {
-  DCHECK(source_info.is_valid());
-  BytecodeNode nop(Bytecode::kNop);
-  nop.source_info().Clone(source_info);
-  WriteToNextStage(&nop);
+    BytecodeSourceInfo* source_info) const {
+  DCHECK(source_info->is_valid());
+  BytecodeNode nop(Bytecode::kNop, source_info);
+  next_stage_->Write(&nop);
 }
 
-void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
   Register input = GetRegisterInputOperand(
       0, node->bytecode(), node->operands(), node->operand_count());
   RegisterInfo* input_info = GetRegisterInfo(input);
-  RegisterTransfer(input_info, accumulator_info_, node->source_info());
+  RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
 }
 
-void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
   Register input = GetRegisterInputOperand(
       0, node->bytecode(), node->operands(), node->operand_count());
   RegisterInfo* input_info = GetRegisterInfo(input);
   Register output = GetRegisterOutputOperand(
       1, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
-  RegisterTransfer(input_info, output_info, node->source_info());
+  RegisterInfo* output_info = GetRegisterInfo(output);
+  RegisterTransfer(input_info, output_info, node->source_info_ptr());
 }
 
-void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
   Register output = GetRegisterOutputOperand(
       0, node->bytecode(), node->operands(), node->operand_count());
-  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
-  RegisterTransfer(accumulator_info_, output_info, node->source_info());
+  RegisterInfo* output_info = GetRegisterInfo(output);
+  RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
 }
 
 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
@@ -451,6 +472,8 @@
   if (reg_info->materialized()) {
     CreateMaterializedEquivalent(reg_info);
   }
+  max_register_index_ =
+      std::max(max_register_index_, reg_info->register_value().index());
   reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
 }
 
@@ -458,7 +481,7 @@
     Register start, int count) {
   for (int i = 0; i < count; ++i) {
     Register reg(start.index() + i);
-    RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+    RegisterInfo* reg_info = GetRegisterInfo(reg);
     PrepareRegisterOutputOperand(reg_info);
   }
 }
@@ -468,7 +491,7 @@
   // For a temporary register, RegInfo state may need be created. For
   // locals and parameters, the RegInfo state is created in the
   // BytecodeRegisterOptimizer constructor.
-  RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+  RegisterInfo* reg_info = GetRegisterInfo(reg);
   if (reg_info->materialized()) {
     return reg;
   } else {
@@ -481,8 +504,8 @@
 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
     BytecodeNode* const node, Register reg, int operand_index) {
   Register equivalent = GetEquivalentRegisterForInputOperand(reg);
-  node->operands()[operand_index] =
-      static_cast<uint32_t>(equivalent.ToOperand());
+  node->UpdateOperand(operand_index,
+                      static_cast<uint32_t>(equivalent.ToOperand()));
 }
 
 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
@@ -510,9 +533,9 @@
       Bytecodes::GetOperandTypes(node->bytecode());
   for (int i = 0; i < operand_count; ++i) {
     int count;
-    // operand_types is terminated by OperandType::kNone so this does not
-    // go out of bounds.
-    if (operand_types[i + 1] == OperandType::kRegCount) {
+    if (operand_types[i] == OperandType::kRegList) {
+      DCHECK_LT(i, operand_count - 1);
+      DCHECK(operand_types[i + 1] == OperandType::kRegCount);
       count = static_cast<int>(operands[i + 1]);
     } else {
       count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
@@ -577,8 +600,8 @@
 BytecodeRegisterOptimizer::RegisterInfo*
 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
   size_t index = GetRegisterInfoTableIndex(reg);
-  return (index < register_info_table_.size()) ? register_info_table_[index]
-                                               : nullptr;
+  DCHECK_LT(index, register_info_table_.size());
+  return register_info_table_[index];
 }
 
 BytecodeRegisterOptimizer::RegisterInfo*
@@ -599,26 +622,37 @@
 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
   DCHECK(RegisterIsTemporary(reg));
   size_t index = GetRegisterInfoTableIndex(reg);
-  DCHECK_GE(index, register_info_table_.size());
-  size_t new_size = index + 1;
-  size_t old_size = register_info_table_.size();
-  register_info_table_.resize(new_size);
-  for (size_t i = old_size; i < new_size; ++i) {
-    register_info_table_[i] = new (zone()) RegisterInfo(
-        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
+  if (index >= register_info_table_.size()) {
+    size_t new_size = index + 1;
+    size_t old_size = register_info_table_.size();
+    register_info_table_.resize(new_size);
+    for (size_t i = old_size; i < new_size; ++i) {
+      register_info_table_[i] =
+          new (zone()) RegisterInfo(RegisterFromRegisterInfoTableIndex(i),
+                                    NextEquivalenceId(), false, false);
+    }
   }
 }
 
-void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
-  RegisterInfo* info = GetRegisterInfo(reg);
-  if (info != nullptr) {
-    // If register is materialized and part of equivalence set, make
-    // sure another member of the set holds the value before the
-    // temporary register is removed.
-    if (info->materialized()) {
-      CreateMaterializedEquivalent(info);
+void BytecodeRegisterOptimizer::RegisterAllocateEvent(Register reg) {
+  GetOrCreateRegisterInfo(reg)->set_allocated(true);
+}
+
+void BytecodeRegisterOptimizer::RegisterListAllocateEvent(
+    RegisterList reg_list) {
+  if (reg_list.register_count() != 0) {
+    int first_index = reg_list.first_register().index();
+    GrowRegisterMap(Register(first_index + reg_list.register_count() - 1));
+    for (int i = 0; i < reg_list.register_count(); i++) {
+      GetRegisterInfo(Register(first_index + i))->set_allocated(true);
     }
-    info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
+  }
+}
+
+void BytecodeRegisterOptimizer::RegisterListFreeEvent(RegisterList reg_list) {
+  int first_index = reg_list.first_register().index();
+  for (int i = 0; i < reg_list.register_count(); i++) {
+    GetRegisterInfo(Register(first_index + i))->set_allocated(false);
   }
 }
 
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
index fb087b5..eda22e5 100644
--- a/src/interpreter/bytecode-register-optimizer.h
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -15,13 +15,14 @@
 // registers. The bytecode generator uses temporary registers
 // liberally for correctness and convenience and this stage removes
 // transfers that are not required and preserves correctness.
-class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
-                                        public TemporaryRegisterObserver,
-                                        public ZoneObject {
+class BytecodeRegisterOptimizer final
+    : public BytecodePipelineStage,
+      public BytecodeRegisterAllocator::Observer,
+      public ZoneObject {
  public:
   BytecodeRegisterOptimizer(Zone* zone,
-                            TemporaryRegisterAllocator* register_allocator,
-                            int parameter_count,
+                            BytecodeRegisterAllocator* register_allocator,
+                            int fixed_registers_count, int parameter_count,
                             BytecodePipelineStage* next_stage);
   virtual ~BytecodeRegisterOptimizer() {}
 
@@ -31,7 +32,7 @@
   void BindLabel(BytecodeLabel* label) override;
   void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
   Handle<BytecodeArray> ToBytecodeArray(
-      Isolate* isolate, int fixed_register_count, int parameter_count,
+      Isolate* isolate, int register_count, int parameter_count,
       Handle<FixedArray> handler_table) override;
 
  private:
@@ -39,34 +40,32 @@
 
   class RegisterInfo;
 
-  // TemporaryRegisterObserver interface.
-  void TemporaryRegisterFreeEvent(Register reg) override;
+  // BytecodeRegisterAllocator::Observer interface.
+  void RegisterAllocateEvent(Register reg) override;
+  void RegisterListAllocateEvent(RegisterList reg_list) override;
+  void RegisterListFreeEvent(RegisterList reg) override;
 
   // Helpers for BytecodePipelineStage interface.
   void FlushState();
-  void WriteToNextStage(BytecodeNode* node) const;
-  void WriteToNextStage(BytecodeNode* node,
-                        const BytecodeSourceInfo& output_info) const;
 
   // Update internal state for register transfer from |input| to
   // |output| using |source_info| as source position information if
   // any bytecodes are emitted due to transfer.
   void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
-                        const BytecodeSourceInfo& source_info);
+                        BytecodeSourceInfo* source_info);
 
   // Emit a register transfer bytecode from |input| to |output|.
-  void OutputRegisterTransfer(
-      RegisterInfo* input, RegisterInfo* output,
-      const BytecodeSourceInfo& source_info = BytecodeSourceInfo());
+  void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
+                              BytecodeSourceInfo* source_info = nullptr);
 
   // Emits a Nop to preserve source position information in the
   // bytecode pipeline.
-  void EmitNopForSourceInfo(const BytecodeSourceInfo& source_info) const;
+  void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
 
   // Handlers for bytecode nodes for register to register transfers.
-  void DoLdar(const BytecodeNode* const node);
-  void DoMov(const BytecodeNode* const node);
-  void DoStar(const BytecodeNode* const node);
+  void DoLdar(BytecodeNode* node);
+  void DoMov(BytecodeNode* node);
+  void DoStar(BytecodeNode* node);
 
   // Operand processing methods for bytecodes other than those
   // performing register to register transfers.
@@ -133,6 +132,7 @@
   const Register accumulator_;
   RegisterInfo* accumulator_info_;
   const Register temporary_base_;
+  int max_register_index_;
 
   // Direct mapping to register info.
   ZoneVector<RegisterInfo*> register_info_table_;
diff --git a/src/interpreter/bytecode-register.cc b/src/interpreter/bytecode-register.cc
index 31e3b90..1ce512b 100644
--- a/src/interpreter/bytecode-register.cc
+++ b/src/interpreter/bytecode-register.cc
@@ -121,7 +121,7 @@
   return true;
 }
 
-std::string Register::ToString(int parameter_count) {
+std::string Register::ToString(int parameter_count) const {
   if (is_current_context()) {
     return std::string("<context>");
   } else if (is_function_closure()) {
diff --git a/src/interpreter/bytecode-register.h b/src/interpreter/bytecode-register.h
index b698da6..d698d40 100644
--- a/src/interpreter/bytecode-register.h
+++ b/src/interpreter/bytecode-register.h
@@ -66,7 +66,7 @@
                             Register reg4 = Register(),
                             Register reg5 = Register());
 
-  std::string ToString(int parameter_count);
+  std::string ToString(int parameter_count) const;
 
   bool operator==(const Register& other) const {
     return index() == other.index();
@@ -98,6 +98,40 @@
   int index_;
 };
 
+class RegisterList {
+ public:
+  RegisterList() : first_reg_index_(Register().index()), register_count_(0) {}
+  RegisterList(int first_reg_index, int register_count)
+      : first_reg_index_(first_reg_index), register_count_(register_count) {}
+
+  // Returns a new RegisterList which is a truncated version of this list, with
+  // |count| registers.
+  const RegisterList Truncate(int new_count) {
+    DCHECK_GE(new_count, 0);
+    DCHECK_LT(new_count, register_count_);
+    return RegisterList(first_reg_index_, new_count);
+  }
+
+  const Register operator[](size_t i) const {
+    DCHECK_LT(static_cast<int>(i), register_count_);
+    return Register(first_reg_index_ + static_cast<int>(i));
+  }
+
+  const Register first_register() const {
+    return (register_count() == 0) ? Register(0) : (*this)[0];
+  }
+
+  const Register last_register() const {
+    return (register_count() == 0) ? Register(0) : (*this)[register_count_ - 1];
+  }
+
+  int register_count() const { return register_count_; }
+
+ private:
+  int first_reg_index_;
+  int register_count_;
+};
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index 672a687..f71598c 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -5,7 +5,7 @@
 #ifndef V8_INTERPRETER_BYTECODE_TRAITS_H_
 #define V8_INTERPRETER_BYTECODE_TRAITS_H_
 
-#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/bytecode-operands.h"
 
 namespace v8 {
 namespace internal {
@@ -65,208 +65,88 @@
   static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
 };
 
-template <OperandType>
-struct RegisterOperandTraits {
-  static const int kIsRegisterOperand = 0;
+template <int... values>
+struct SumHelper;
+template <int value>
+struct SumHelper<value> {
+  static const int kValue = value;
+};
+template <int value, int... values>
+struct SumHelper<value, values...> {
+  static const int kValue = value + SumHelper<values...>::kValue;
 };
 
-#define DECLARE_REGISTER_OPERAND(Name, _)              \
-  template <>                                          \
-  struct RegisterOperandTraits<OperandType::k##Name> { \
-    static const int kIsRegisterOperand = 1;           \
-  };
-REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
-#undef DECLARE_REGISTER_OPERAND
-
-template <AccumulatorUse, OperandType...>
-struct BytecodeTraits {};
-
-template <AccumulatorUse accumulator_use, OperandType operand_0,
-          OperandType operand_1, OperandType operand_2, OperandType operand_3>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
-                      operand_3> {
-  static const OperandType* GetOperandTypes() {
-    static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
-                                                operand_3, OperandType::kNone};
-    return operand_types;
-  }
-
-  static const OperandTypeInfo* GetOperandTypeInfos() {
-    static const OperandTypeInfo operand_type_infos[] = {
-        OperandTraits<operand_0>::kOperandTypeInfo,
-        OperandTraits<operand_1>::kOperandTypeInfo,
-        OperandTraits<operand_2>::kOperandTypeInfo,
-        OperandTraits<operand_3>::kOperandTypeInfo, OperandTypeInfo::kNone};
-    return operand_type_infos;
-  }
-
-  template <OperandType ot>
-  static inline bool HasAnyOperandsOfType() {
-    return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
-           operand_3 == ot;
-  }
-
-  static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_3>::TypeInfoTraits::kIsScalable);
-  }
-
+template <AccumulatorUse accumulator_use, OperandType... operands>
+struct BytecodeTraits {
+  static const OperandType kOperandTypes[];
+  static const OperandTypeInfo kOperandTypeInfos[];
+  static const OperandSize kSingleScaleOperandSizes[];
+  static const OperandSize kDoubleScaleOperandSizes[];
+  static const OperandSize kQuadrupleScaleOperandSizes[];
+  static const int kSingleScaleSize = SumHelper<
+      1, OperandScaler<operands, OperandScale::kSingle>::kSize...>::kValue;
+  static const int kDoubleScaleSize = SumHelper<
+      1, OperandScaler<operands, OperandScale::kDouble>::kSize...>::kValue;
+  static const int kQuadrupleScaleSize = SumHelper<
+      1, OperandScaler<operands, OperandScale::kQuadruple>::kSize...>::kValue;
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
-  static const int kOperandCount = 4;
-  static const int kRegisterOperandCount =
-      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_2>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_3>::kIsRegisterOperand;
+  static const int kOperandCount = sizeof...(operands);
 };
 
-template <AccumulatorUse accumulator_use, OperandType operand_0,
-          OperandType operand_1, OperandType operand_2>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
-  static const OperandType* GetOperandTypes() {
-    static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
-                                                OperandType::kNone};
-    return operand_types;
-  }
-
-  static const OperandTypeInfo* GetOperandTypeInfos() {
-    static const OperandTypeInfo operand_type_infos[] = {
-        OperandTraits<operand_0>::kOperandTypeInfo,
-        OperandTraits<operand_1>::kOperandTypeInfo,
-        OperandTraits<operand_2>::kOperandTypeInfo, OperandTypeInfo::kNone};
-    return operand_type_infos;
-  }
-
-  template <OperandType ot>
-  static inline bool HasAnyOperandsOfType() {
-    return operand_0 == ot || operand_1 == ot || operand_2 == ot;
-  }
-
-  static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable);
-  }
-
-  static const AccumulatorUse kAccumulatorUse = accumulator_use;
-  static const int kOperandCount = 3;
-  static const int kRegisterOperandCount =
-      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_2>::kIsRegisterOperand;
-};
-
-template <AccumulatorUse accumulator_use, OperandType operand_0,
-          OperandType operand_1>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
-  static const OperandType* GetOperandTypes() {
-    static const OperandType operand_types[] = {operand_0, operand_1,
-                                                OperandType::kNone};
-    return operand_types;
-  }
-
-  static const OperandTypeInfo* GetOperandTypeInfos() {
-    static const OperandTypeInfo operand_type_infos[] = {
-        OperandTraits<operand_0>::kOperandTypeInfo,
-        OperandTraits<operand_1>::kOperandTypeInfo, OperandTypeInfo::kNone};
-    return operand_type_infos;
-  }
-
-  template <OperandType ot>
-  static inline bool HasAnyOperandsOfType() {
-    return operand_0 == ot || operand_1 == ot;
-  }
-
-  static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
-            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable);
-  }
-
-  static const AccumulatorUse kAccumulatorUse = accumulator_use;
-  static const int kOperandCount = 2;
-  static const int kRegisterOperandCount =
-      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
-      RegisterOperandTraits<operand_1>::kIsRegisterOperand;
-};
-
-template <AccumulatorUse accumulator_use, OperandType operand_0>
-struct BytecodeTraits<accumulator_use, operand_0> {
-  static const OperandType* GetOperandTypes() {
-    static const OperandType operand_types[] = {operand_0, OperandType::kNone};
-    return operand_types;
-  }
-
-  static const OperandTypeInfo* GetOperandTypeInfos() {
-    static const OperandTypeInfo operand_type_infos[] = {
-        OperandTraits<operand_0>::kOperandTypeInfo, OperandTypeInfo::kNone};
-    return operand_type_infos;
-  }
-
-  template <OperandType ot>
-  static inline bool HasAnyOperandsOfType() {
-    return operand_0 == ot;
-  }
-
-  static inline bool IsScalable() {
-    return OperandTraits<operand_0>::TypeInfoTraits::kIsScalable;
-  }
-
-  static const AccumulatorUse kAccumulatorUse = accumulator_use;
-  static const int kOperandCount = 1;
-  static const int kRegisterOperandCount =
-      RegisterOperandTraits<operand_0>::kIsRegisterOperand;
-};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandType
+    BytecodeTraits<accumulator_use, operands...>::kOperandTypes[] = {
+        operands...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
+    BytecodeTraits<accumulator_use, operands...>::kOperandTypeInfos[] = {
+        OperandTraits<operands>::kOperandTypeInfo...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+    BytecodeTraits<accumulator_use, operands...>::kSingleScaleOperandSizes[] = {
+        OperandScaler<operands, OperandScale::kSingle>::kOperandSize...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+    BytecodeTraits<accumulator_use, operands...>::kDoubleScaleOperandSizes[] = {
+        OperandScaler<operands, OperandScale::kDouble>::kOperandSize...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize BytecodeTraits<
+    accumulator_use, operands...>::kQuadrupleScaleOperandSizes[] = {
+    OperandScaler<operands, OperandScale::kQuadruple>::kOperandSize...};
 
 template <AccumulatorUse accumulator_use>
 struct BytecodeTraits<accumulator_use> {
-  static const OperandType* GetOperandTypes() {
-    static const OperandType operand_types[] = {OperandType::kNone};
-    return operand_types;
-  }
-
-  static const OperandTypeInfo* GetOperandTypeInfos() {
-    static const OperandTypeInfo operand_type_infos[] = {
-        OperandTypeInfo::kNone};
-    return operand_type_infos;
-  }
-
-  template <OperandType ot>
-  static inline bool HasAnyOperandsOfType() {
-    return false;
-  }
-
-  static inline bool IsScalable() { return false; }
-
+  static const OperandType kOperandTypes[];
+  static const OperandTypeInfo kOperandTypeInfos[];
+  static const OperandSize kSingleScaleOperandSizes[];
+  static const OperandSize kDoubleScaleOperandSizes[];
+  static const OperandSize kQuadrupleScaleOperandSizes[];
+  static const int kSingleScaleSize = 1;
+  static const int kDoubleScaleSize = 1;
+  static const int kQuadrupleScaleSize = 1;
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 0;
-  static const int kRegisterOperandCount = 0;
 };
 
-static OperandSize ScaledOperandSize(OperandType operand_type,
-                                     OperandScale operand_scale) {
-  STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
-                OperandScale::kLast == OperandScale::kQuadruple);
-  int index = static_cast<int>(operand_scale) >> 1;
-  switch (operand_type) {
-#define CASE(Name, TypeInfo)                                    \
-  case OperandType::k##Name: {                                  \
-    static const OperandSize kOperandSizes[] = {                \
-        OperandScaler<OperandType::k##Name,                     \
-                      OperandScale::kSingle>::kOperandSize,     \
-        OperandScaler<OperandType::k##Name,                     \
-                      OperandScale::kDouble>::kOperandSize,     \
-        OperandScaler<OperandType::k##Name,                     \
-                      OperandScale::kQuadruple>::kOperandSize}; \
-    return kOperandSizes[index];                                \
-  }
-    OPERAND_TYPE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return OperandSize::kNone;
-}
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandType
+    BytecodeTraits<accumulator_use>::kOperandTypes[] = {OperandType::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
+    BytecodeTraits<accumulator_use>::kOperandTypeInfos[] = {
+        OperandTypeInfo::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+    BytecodeTraits<accumulator_use>::kSingleScaleOperandSizes[] = {
+        OperandSize::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+    BytecodeTraits<accumulator_use>::kDoubleScaleOperandSizes[] = {
+        OperandSize::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+    BytecodeTraits<accumulator_use>::kQuadrupleScaleOperandSizes[] = {
+        OperandSize::kNone};
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 09bcd22..c58f468 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -7,14 +7,55 @@
 #include <iomanip>
 
 #include "src/base/bits.h"
-#include "src/globals.h"
 #include "src/interpreter/bytecode-traits.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-STATIC_CONST_MEMBER_DEFINITION const int Bytecodes::kMaxOperands;
+// clang-format off
+const OperandType* const Bytecodes::kOperandTypes[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandTypes,
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const OperandTypeInfo* const Bytecodes::kOperandTypeInfos[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandTypeInfos,
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const int Bytecodes::kOperandCount[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandCount,
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const AccumulatorUse Bytecodes::kAccumulatorUse[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kAccumulatorUse,
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const int Bytecodes::kBytecodeSizes[][3] = {
+#define ENTRY(Name, ...)                            \
+  { BytecodeTraits<__VA_ARGS__>::kSingleScaleSize,  \
+    BytecodeTraits<__VA_ARGS__>::kDoubleScaleSize,  \
+    BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleSize },
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const OperandSize* const Bytecodes::kOperandSizes[][3] = {
+#define ENTRY(Name, ...)                                    \
+  { BytecodeTraits<__VA_ARGS__>::kSingleScaleOperandSizes,  \
+    BytecodeTraits<__VA_ARGS__>::kDoubleScaleOperandSizes,  \
+    BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleOperandSizes },
+  BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+// clang-format on
 
 // static
 const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -44,77 +85,6 @@
 }
 
 // static
-const char* Bytecodes::AccumulatorUseToString(AccumulatorUse accumulator_use) {
-  switch (accumulator_use) {
-    case AccumulatorUse::kNone:
-      return "None";
-    case AccumulatorUse::kRead:
-      return "Read";
-    case AccumulatorUse::kWrite:
-      return "Write";
-    case AccumulatorUse::kReadWrite:
-      return "ReadWrite";
-  }
-  UNREACHABLE();
-  return "";
-}
-
-// static
-const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
-  switch (operand_type) {
-#define CASE(Name, _)        \
-  case OperandType::k##Name: \
-    return #Name;
-    OPERAND_TYPE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return "";
-}
-
-// static
-const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
-  switch (operand_scale) {
-#define CASE(Name, _)         \
-  case OperandScale::k##Name: \
-    return #Name;
-    OPERAND_SCALE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return "";
-}
-
-// static
-const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
-  switch (operand_size) {
-    case OperandSize::kNone:
-      return "None";
-    case OperandSize::kByte:
-      return "Byte";
-    case OperandSize::kShort:
-      return "Short";
-    case OperandSize::kQuad:
-      return "Quad";
-  }
-  UNREACHABLE();
-  return "";
-}
-
-// static
-uint8_t Bytecodes::ToByte(Bytecode bytecode) {
-  DCHECK_LE(bytecode, Bytecode::kLast);
-  return static_cast<uint8_t>(bytecode);
-}
-
-// static
-Bytecode Bytecodes::FromByte(uint8_t value) {
-  Bytecode bytecode = static_cast<Bytecode>(value);
-  DCHECK(bytecode <= Bytecode::kLast);
-  return bytecode;
-}
-
-// static
 Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
   DCHECK(!IsDebugBreak(bytecode));
   if (bytecode == Bytecode::kWide) {
@@ -124,7 +94,7 @@
     return Bytecode::kDebugBreakExtraWide;
   }
   int bytecode_size = Size(bytecode, OperandScale::kSingle);
-#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name, ...)                    \
+#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name)                         \
   if (bytecode_size == Size(Bytecode::k##Name, OperandScale::kSingle)) { \
     return Bytecode::k##Name;                                            \
   }
@@ -135,224 +105,6 @@
 }
 
 // static
-int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
-  int size = 1;
-  for (int i = 0; i < NumberOfOperands(bytecode); i++) {
-    OperandSize operand_size = GetOperandSize(bytecode, i, operand_scale);
-    int delta = static_cast<int>(operand_size);
-    DCHECK(base::bits::IsPowerOfTwo32(static_cast<uint32_t>(delta)));
-    size += delta;
-  }
-  return size;
-}
-
-// static
-size_t Bytecodes::ReturnCount(Bytecode bytecode) {
-  return bytecode == Bytecode::kReturn ? 1 : 0;
-}
-
-// static
-int Bytecodes::NumberOfOperands(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::kOperandCount;
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-// static
-int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)                              \
-  case Bytecode::k##Name:                            \
-    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
-    return Name##Trait::kRegisterOperandCount;
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return false;
-}
-
-// static
-Bytecode Bytecodes::OperandScaleToPrefixBytecode(OperandScale operand_scale) {
-  switch (operand_scale) {
-    case OperandScale::kQuadruple:
-      return Bytecode::kExtraWide;
-    case OperandScale::kDouble:
-      return Bytecode::kWide;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-// static
-bool Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
-  return operand_scale != OperandScale::kSingle;
-}
-
-// static
-OperandScale Bytecodes::PrefixBytecodeToOperandScale(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kExtraWide:
-    case Bytecode::kDebugBreakExtraWide:
-      return OperandScale::kQuadruple;
-    case Bytecode::kWide:
-    case Bytecode::kDebugBreakWide:
-      return OperandScale::kDouble;
-    default:
-      UNREACHABLE();
-      return OperandScale::kSingle;
-  }
-}
-
-// static
-AccumulatorUse Bytecodes::GetAccumulatorUse(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::kAccumulatorUse;
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return AccumulatorUse::kNone;
-}
-
-// static
-bool Bytecodes::ReadsAccumulator(Bytecode bytecode) {
-  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
-         AccumulatorUse::kRead;
-}
-
-// static
-bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
-  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
-         AccumulatorUse::kWrite;
-}
-
-// static
-bool Bytecodes::WritesBooleanToAccumulator(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kLdaTrue:
-    case Bytecode::kLdaFalse:
-    case Bytecode::kToBooleanLogicalNot:
-    case Bytecode::kLogicalNot:
-    case Bytecode::kTestEqual:
-    case Bytecode::kTestNotEqual:
-    case Bytecode::kTestEqualStrict:
-    case Bytecode::kTestLessThan:
-    case Bytecode::kTestLessThanOrEqual:
-    case Bytecode::kTestGreaterThan:
-    case Bytecode::kTestGreaterThanOrEqual:
-    case Bytecode::kTestInstanceOf:
-    case Bytecode::kTestIn:
-    case Bytecode::kForInDone:
-      return true;
-    default:
-      return false;
-  }
-}
-
-// static
-bool Bytecodes::IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kLdaZero:
-    case Bytecode::kLdaSmi:
-    case Bytecode::kLdaUndefined:
-    case Bytecode::kLdaNull:
-    case Bytecode::kLdaTheHole:
-    case Bytecode::kLdaTrue:
-    case Bytecode::kLdaFalse:
-    case Bytecode::kLdaConstant:
-    case Bytecode::kLdar:
-      return true;
-    default:
-      return false;
-  }
-}
-
-// static
-bool Bytecodes::IsJumpWithoutEffects(Bytecode bytecode) {
-  return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
-}
-
-// static
-bool Bytecodes::IsRegisterLoadWithoutEffects(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kMov:
-    case Bytecode::kPopContext:
-    case Bytecode::kPushContext:
-    case Bytecode::kStar:
-    case Bytecode::kLdrUndefined:
-      return true;
-    default:
-      return false;
-  }
-}
-
-// static
-bool Bytecodes::IsWithoutExternalSideEffects(Bytecode bytecode) {
-  // These bytecodes only manipulate interpreter frame state and will
-  // never throw.
-  return (IsAccumulatorLoadWithoutEffects(bytecode) ||
-          IsRegisterLoadWithoutEffects(bytecode) ||
-          bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
-}
-
-// static
-OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
-  DCHECK_LE(bytecode, Bytecode::kLast);
-  DCHECK_LT(i, NumberOfOperands(bytecode));
-  DCHECK_GE(i, 0);
-  return GetOperandTypes(bytecode)[i];
-}
-
-// static
-const OperandType* Bytecodes::GetOperandTypes(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::GetOperandTypes();
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return nullptr;
-}
-
-// static
-const OperandTypeInfo* Bytecodes::GetOperandTypeInfos(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::GetOperandTypeInfos();
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return nullptr;
-}
-
-// static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
-                                      OperandScale operand_scale) {
-  DCHECK_LT(i, NumberOfOperands(bytecode));
-  OperandType operand_type = GetOperandType(bytecode, i);
-  return SizeOfOperand(operand_type, operand_scale);
-}
-
-// static
 int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
                                 OperandScale operand_scale) {
   DCHECK_LT(i, Bytecodes::NumberOfOperands(bytecode));
@@ -367,67 +119,6 @@
 }
 
 // static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
-                                     OperandScale operand_scale) {
-  return static_cast<OperandSize>(
-      ScaledOperandSize(operand_type, operand_scale));
-}
-
-// static
-bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpIfTrue ||
-         bytecode == Bytecode::kJumpIfFalse ||
-         bytecode == Bytecode::kJumpIfToBooleanTrue ||
-         bytecode == Bytecode::kJumpIfToBooleanFalse ||
-         bytecode == Bytecode::kJumpIfNotHole ||
-         bytecode == Bytecode::kJumpIfNull ||
-         bytecode == Bytecode::kJumpIfUndefined;
-}
-
-// static
-bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpIfTrueConstant ||
-         bytecode == Bytecode::kJumpIfFalseConstant ||
-         bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
-         bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
-         bytecode == Bytecode::kJumpIfNotHoleConstant ||
-         bytecode == Bytecode::kJumpIfNullConstant ||
-         bytecode == Bytecode::kJumpIfUndefinedConstant;
-}
-
-// static
-bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
-  return IsConditionalJumpImmediate(bytecode) ||
-         IsConditionalJumpConstant(bytecode);
-}
-
-
-// static
-bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
-  return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
-}
-
-
-// static
-bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpConstant ||
-         IsConditionalJumpConstant(bytecode);
-}
-
-// static
-bool Bytecodes::IsJump(Bytecode bytecode) {
-  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
-}
-
-// static
-bool Bytecodes::IsJumpIfToBoolean(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpIfToBooleanTrue ||
-         bytecode == Bytecode::kJumpIfToBooleanFalse ||
-         bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
-         bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
-}
-
-// static
 Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
   switch (bytecode) {
     case Bytecode::kJumpIfToBooleanTrue:
@@ -446,19 +137,6 @@
 }
 
 // static
-bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
-  return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
-         bytecode == Bytecode::kNew;
-}
-
-// static
-bool Bytecodes::IsCallRuntime(Bytecode bytecode) {
-  return bytecode == Bytecode::kCallRuntime ||
-         bytecode == Bytecode::kCallRuntimeForPair ||
-         bytecode == Bytecode::kInvokeIntrinsic;
-}
-
-// static
 bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
   switch (bytecode) {
 #define CASE(Name, ...) case Bytecode::k##Name:
@@ -472,53 +150,6 @@
 }
 
 // static
-bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
-  return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
-}
-
-// static
-bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
-  switch (bytecode) {
-#define CASE(Name, ...)                              \
-  case Bytecode::k##Name:                            \
-    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
-    return Name##Trait::IsScalable();
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return false;
-}
-
-// static
-bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kExtraWide:
-    case Bytecode::kDebugBreakExtraWide:
-    case Bytecode::kWide:
-    case Bytecode::kDebugBreakWide:
-      return true;
-    default:
-      return false;
-  }
-}
-
-// static
-bool Bytecodes::PutsNameInAccumulator(Bytecode bytecode) {
-  return bytecode == Bytecode::kTypeOf;
-}
-
-// static
-bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
-  return bytecode == Bytecode::kReturn || IsJump(bytecode);
-}
-
-// static
-bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
-  return operand_type == OperandType::kMaybeReg;
-}
-
-// static
 bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
   switch (operand_type) {
 #define CASE(Name, _)        \
@@ -599,21 +230,11 @@
 }
 
 // static
-int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
-  switch (operand_type) {
-    case OperandType::kMaybeReg:
-    case OperandType::kReg:
-    case OperandType::kRegOut:
-      return 1;
-    case OperandType::kRegPair:
-    case OperandType::kRegOutPair:
-      return 2;
-    case OperandType::kRegOutTriple:
-      return 3;
-    default:
-      return 0;
+bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
+  for (int i = 0; i < NumberOfOperands(bytecode); i++) {
+    if (OperandIsScalable(bytecode, i)) return true;
   }
-  return 0;
+  return false;
 }
 
 // static
@@ -630,25 +251,28 @@
 }
 
 // static
-OperandSize Bytecodes::SizeForSignedOperand(int value) {
-  if (value >= kMinInt8 && value <= kMaxInt8) {
-    return OperandSize::kByte;
-  } else if (value >= kMinInt16 && value <= kMaxInt16) {
-    return OperandSize::kShort;
-  } else {
-    return OperandSize::kQuad;
-  }
-}
-
-// static
-OperandSize Bytecodes::SizeForUnsignedOperand(uint32_t value) {
-  if (value <= kMaxUInt8) {
-    return OperandSize::kByte;
-  } else if (value <= kMaxUInt16) {
-    return OperandSize::kShort;
-  } else {
-    return OperandSize::kQuad;
-  }
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
+                                     OperandScale operand_scale) {
+  DCHECK_LE(operand_type, OperandType::kLast);
+  DCHECK_GE(operand_scale, OperandScale::kSingle);
+  DCHECK_LE(operand_scale, OperandScale::kLast);
+  STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+                OperandScale::kLast == OperandScale::kQuadruple);
+  int scale_index = static_cast<int>(operand_scale) >> 1;
+  // clang-format off
+  static const OperandSize kOperandSizes[][3] = {
+#define ENTRY(Name, ...)                                \
+  { OperandScaler<OperandType::k##Name,                 \
+                 OperandScale::kSingle>::kOperandSize,  \
+    OperandScaler<OperandType::k##Name,                 \
+                 OperandScale::kDouble>::kOperandSize,  \
+    OperandScaler<OperandType::k##Name,                 \
+                 OperandScale::kQuadruple>::kOperandSize },
+    OPERAND_TYPE_LIST(ENTRY)
+#undef ENTRY
+  };
+  // clang-format on
+  return kOperandSizes[static_cast<size_t>(operand_type)][scale_index];
 }
 
 // static
@@ -662,22 +286,6 @@
   return os << Bytecodes::ToString(bytecode);
 }
 
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
-  return os << Bytecodes::AccumulatorUseToString(use);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
-  return os << Bytecodes::OperandSizeToString(operand_size);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
-  return os << Bytecodes::OperandScaleToString(operand_scale);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
-  return os << Bytecodes::OperandTypeToString(operand_type);
-}
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 036ae72..6232966 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -9,6 +9,9 @@
 #include <iosfwd>
 #include <string>
 
+#include "src/globals.h"
+#include "src/interpreter/bytecode-operands.h"
+
 // This interface and it's implementation are independent of the
 // libv8_base library as they are used by the interpreter and the
 // standalone mkpeephole table generator program.
@@ -17,64 +20,8 @@
 namespace internal {
 namespace interpreter {
 
-#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
-
-#define REGISTER_INPUT_OPERAND_TYPE_LIST(V)         \
-  V(MaybeReg, OperandTypeInfo::kScalableSignedByte) \
-  V(Reg, OperandTypeInfo::kScalableSignedByte)      \
-  V(RegPair, OperandTypeInfo::kScalableSignedByte)
-
-#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)          \
-  V(RegOut, OperandTypeInfo::kScalableSignedByte)     \
-  V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
-  V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-
-#define SCALAR_OPERAND_TYPE_LIST(V)                   \
-  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
-  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
-  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
-  V(Imm, OperandTypeInfo::kScalableSignedByte)        \
-  V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
-  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
-
-#define REGISTER_OPERAND_TYPE_LIST(V) \
-  REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
-  REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
-
-#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
-  INVALID_OPERAND_TYPE_LIST(V)            \
-  SCALAR_OPERAND_TYPE_LIST(V)
-
-// The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V)        \
-  NON_REGISTER_OPERAND_TYPE_LIST(V) \
-  REGISTER_OPERAND_TYPE_LIST(V)
-
-// Define one debug break bytecode for each possible size of unscaled
-// bytecodes. Format is V(<bytecode>, <accumulator_use>, <operands>).
-#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V)                                    \
-  V(DebugBreak0, AccumulatorUse::kRead)                                       \
-  V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg)                    \
-  V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
-  V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
-    OperandType::kReg)                                                        \
-  V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
-    OperandType::kReg, OperandType::kReg)                                     \
-  V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
-    OperandType::kReg, OperandType::kReg)                                     \
-  V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
-    OperandType::kReg, OperandType::kReg, OperandType::kReg)
-
-// Define one debug break for each widening prefix.
-#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
-  V(DebugBreakWide, AccumulatorUse::kRead)  \
-  V(DebugBreakExtraWide, AccumulatorUse::kRead)
-
-#define DEBUG_BREAK_BYTECODE_LIST(V) \
-  DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
-  DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
-
 // The list of bytecodes which are interpreted by the interpreter.
+// Format is V(<bytecode>, <accumulator_use>, <operands>).
 #define BYTECODE_LIST(V)                                                       \
   /* Extended width operands */                                                \
   V(Wide, AccumulatorUse::kNone)                                               \
@@ -106,15 +53,23 @@
   V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                  \
   V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                      \
   V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                 \
-    OperandType::kIdx)                                                         \
+    OperandType::kIdx, OperandType::kUImm)                                     \
   V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kRegOut)                                   \
+    OperandType::kIdx, OperandType::kUImm, OperandType::kRegOut)               \
   V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                  \
-    OperandType::kIdx)                                                         \
+    OperandType::kIdx, OperandType::kUImm)                                     \
                                                                                \
   /* Load-Store lookup slots */                                                \
   V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                  \
+  V(LdaLookupContextSlot, AccumulatorUse::kWrite, OperandType::kIdx,           \
+    OperandType::kIdx, OperandType::kUImm)                                     \
+  V(LdaLookupGlobalSlot, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kUImm)                                     \
   V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)      \
+  V(LdaLookupContextSlotInsideTypeof, AccumulatorUse::kWrite,                  \
+    OperandType::kIdx, OperandType::kIdx, OperandType::kUImm)                  \
+  V(LdaLookupGlobalSlotInsideTypeof, AccumulatorUse::kWrite,                   \
+    OperandType::kIdx, OperandType::kIdx, OperandType::kUImm)                  \
   V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
   V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
                                                                                \
@@ -188,33 +143,40 @@
   V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)       \
                                                                                \
   /* Call operations */                                                        \
-  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,        \
+  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList,    \
     OperandType::kRegCount, OperandType::kIdx)                                 \
-  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,    \
-    OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg,                       \
+    OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
   V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,              \
-    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+    OperandType::kRegList, OperandType::kRegCount)                             \
   V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,        \
-    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair)  \
+    OperandType::kRegList, OperandType::kRegCount, OperandType::kRegOutPair)   \
   V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                  \
-    OperandType::kReg, OperandType::kRegCount)                                 \
+    OperandType::kRegList, OperandType::kRegCount)                             \
                                                                                \
   /* Intrinsics */                                                             \
   V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId,        \
-    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+    OperandType::kRegList, OperandType::kRegCount)                             \
                                                                                \
   /* New operator */                                                           \
-  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                        \
-    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+  V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
+    OperandType::kRegCount, OperandType::kIdx)                                 \
                                                                                \
   /* Test Operators */                                                         \
-  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
-  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)               \
-  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)            \
-  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)               \
-  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)            \
-  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)        \
-  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)     \
+  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg,                  \
+    OperandType::kIdx)                                                         \
+  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg,               \
+    OperandType::kIdx)                                                         \
+  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg,            \
+    OperandType::kIdx)                                                         \
+  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg,               \
+    OperandType::kIdx)                                                         \
+  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg,            \
+    OperandType::kIdx)                                                         \
+  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg,        \
+    OperandType::kIdx)                                                         \
+  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg,     \
+    OperandType::kIdx)                                                         \
   V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)             \
   V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                     \
                                                                                \
@@ -238,10 +200,10 @@
   /* Context allocation */                                                     \
   V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx)         \
   V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg,         \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kUImm)         \
+  V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg,          \
     OperandType::kIdx)                                                         \
-  /* TODO(klaasb) rename Idx or add unsigned Imm OperandType? */               \
-  V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kIdx)          \
-  V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg)          \
                                                                                \
   /* Arguments allocation */                                                   \
   V(CreateMappedArguments, AccumulatorUse::kWrite)                             \
@@ -265,11 +227,13 @@
   V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)         \
   V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                   \
   V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)           \
+  V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm)     \
                                                                                \
   /* Complex flow control For..in */                                           \
   V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg,                    \
     OperandType::kRegOutTriple)                                                \
-  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)   \
+  V(ForInContinue, AccumulatorUse::kWrite, OperandType::kReg,                  \
+    OperandType::kReg)                                                         \
   V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
     OperandType::kRegPair, OperandType::kIdx)                                  \
   V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                      \
@@ -277,9 +241,6 @@
   /* Perform a stack guard check */                                            \
   V(StackCheck, AccumulatorUse::kNone)                                         \
                                                                                \
-  /* Perform a check to trigger on-stack replacement */                        \
-  V(OsrPoll, AccumulatorUse::kNone, OperandType::kImm)                         \
-                                                                               \
   /* Non-local flow control */                                                 \
   V(Throw, AccumulatorUse::kRead)                                              \
   V(ReThrow, AccumulatorUse::kRead)                                            \
@@ -291,7 +252,22 @@
                                                                                \
   /* Debugger */                                                               \
   V(Debugger, AccumulatorUse::kNone)                                           \
-  DEBUG_BREAK_BYTECODE_LIST(V)                                                 \
+                                                                               \
+  /* Debug Breakpoints - one for each possible size of unscaled bytecodes */   \
+  /* and one for each operand widening prefix bytecode                    */   \
+  V(DebugBreak0, AccumulatorUse::kRead)                                        \
+  V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg)                     \
+  V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg)  \
+  V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg,  \
+    OperandType::kReg)                                                         \
+  V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg,  \
+    OperandType::kReg, OperandType::kReg)                                      \
+  V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId,               \
+    OperandType::kReg, OperandType::kReg)                                      \
+  V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId,               \
+    OperandType::kReg, OperandType::kReg, OperandType::kReg)                   \
+  V(DebugBreakWide, AccumulatorUse::kRead)                                     \
+  V(DebugBreakExtraWide, AccumulatorUse::kRead)                                \
                                                                                \
   /* Illegal bytecode (terminates execution) */                                \
   V(Illegal, AccumulatorUse::kNone)                                            \
@@ -300,74 +276,23 @@
   /* eliminated bytecodes). */                                                 \
   V(Nop, AccumulatorUse::kNone)
 
-enum class AccumulatorUse : uint8_t {
-  kNone = 0,
-  kRead = 1 << 0,
-  kWrite = 1 << 1,
-  kReadWrite = kRead | kWrite
-};
+// List of debug break bytecodes.
+#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+  V(DebugBreak0)                           \
+  V(DebugBreak1)                           \
+  V(DebugBreak2)                           \
+  V(DebugBreak3)                           \
+  V(DebugBreak4)                           \
+  V(DebugBreak5)                           \
+  V(DebugBreak6)
 
-inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
-  int result = static_cast<int>(lhs) & static_cast<int>(rhs);
-  return static_cast<AccumulatorUse>(result);
-}
+#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
+  V(DebugBreakWide)                         \
+  V(DebugBreakExtraWide)
 
-inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
-  int result = static_cast<int>(lhs) | static_cast<int>(rhs);
-  return static_cast<AccumulatorUse>(result);
-}
-
-// Enumeration of scaling factors applicable to scalable operands. Code
-// relies on being able to cast values to integer scaling values.
-#define OPERAND_SCALE_LIST(V) \
-  V(Single, 1)                \
-  V(Double, 2)                \
-  V(Quadruple, 4)
-
-enum class OperandScale : uint8_t {
-#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
-  OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
-#undef DECLARE_OPERAND_SCALE
-      kLast = kQuadruple
-};
-
-// Enumeration of the size classes of operand types used by
-// bytecodes. Code relies on being able to cast values to integer
-// types to get the size in bytes.
-enum class OperandSize : uint8_t {
-  kNone = 0,
-  kByte = 1,
-  kShort = 2,
-  kQuad = 4,
-  kLast = kQuad
-};
-
-// Primitive operand info used that summarize properties of operands.
-// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
-#define OPERAND_TYPE_INFO_LIST(V)                         \
-  V(None, false, false, OperandSize::kNone)               \
-  V(ScalableSignedByte, true, false, OperandSize::kByte)  \
-  V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
-  V(FixedUnsignedByte, false, true, OperandSize::kByte)   \
-  V(FixedUnsignedShort, false, true, OperandSize::kShort)
-
-enum class OperandTypeInfo : uint8_t {
-#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
-  OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
-#undef DECLARE_OPERAND_TYPE_INFO
-};
-
-// Enumeration of operand types used by bytecodes.
-enum class OperandType : uint8_t {
-#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
-  OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
-#undef DECLARE_OPERAND_TYPE
-#define COUNT_OPERAND_TYPES(x, _) +1
-  // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
-  // evaluate to the same value as the last operand.
-  kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
-#undef COUNT_OPERAND_TYPES
-};
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
 // Enumeration of interpreter bytecodes.
 enum class Bytecode : uint8_t {
@@ -381,6 +306,14 @@
 #undef COUNT_BYTECODE
 };
 
+// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
+// See crbug.com/603131.
+#if V8_CC_MSVC
+#define CONSTEXPR const
+#else
+#define CONSTEXPR constexpr
+#endif
+
 class Bytecodes final {
  public:
   //  The maximum number of operands a bytecode may have.
@@ -392,157 +325,315 @@
   // Returns string representation of |bytecode|.
   static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
 
-  // Returns string representation of |accumulator_use|.
-  static const char* AccumulatorUseToString(AccumulatorUse accumulator_use);
-
-  // Returns string representation of |operand_type|.
-  static const char* OperandTypeToString(OperandType operand_type);
-
-  // Returns string representation of |operand_scale|.
-  static const char* OperandScaleToString(OperandScale operand_scale);
-
-  // Returns string representation of |operand_size|.
-  static const char* OperandSizeToString(OperandSize operand_size);
-
   // Returns byte value of bytecode.
-  static uint8_t ToByte(Bytecode bytecode);
+  static uint8_t ToByte(Bytecode bytecode) {
+    DCHECK_LE(bytecode, Bytecode::kLast);
+    return static_cast<uint8_t>(bytecode);
+  }
 
   // Returns bytecode for |value|.
-  static Bytecode FromByte(uint8_t value);
-
-  // Returns the number of operands expected by |bytecode|.
-  static int NumberOfOperands(Bytecode bytecode);
-
-  // Returns the number of register operands expected by |bytecode|.
-  static int NumberOfRegisterOperands(Bytecode bytecode);
+  static Bytecode FromByte(uint8_t value) {
+    Bytecode bytecode = static_cast<Bytecode>(value);
+    DCHECK(bytecode <= Bytecode::kLast);
+    return bytecode;
+  }
 
   // Returns the prefix bytecode representing an operand scale to be
   // applied to a a bytecode.
-  static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale);
+  static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale) {
+    switch (operand_scale) {
+      case OperandScale::kQuadruple:
+        return Bytecode::kExtraWide;
+      case OperandScale::kDouble:
+        return Bytecode::kWide;
+      default:
+        UNREACHABLE();
+        return Bytecode::kIllegal;
+    }
+  }
 
   // Returns true if the operand scale requires a prefix bytecode.
-  static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale);
+  static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
+    return operand_scale != OperandScale::kSingle;
+  }
 
   // Returns the scaling applied to scalable operands if bytecode is
   // is a scaling prefix.
-  static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode);
+  static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode) {
+    switch (bytecode) {
+      case Bytecode::kExtraWide:
+      case Bytecode::kDebugBreakExtraWide:
+        return OperandScale::kQuadruple;
+      case Bytecode::kWide:
+      case Bytecode::kDebugBreakWide:
+        return OperandScale::kDouble;
+      default:
+        UNREACHABLE();
+        return OperandScale::kSingle;
+    }
+  }
 
   // Returns how accumulator is used by |bytecode|.
-  static AccumulatorUse GetAccumulatorUse(Bytecode bytecode);
+  static AccumulatorUse GetAccumulatorUse(Bytecode bytecode) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    return kAccumulatorUse[static_cast<size_t>(bytecode)];
+  }
 
   // Returns true if |bytecode| reads the accumulator.
-  static bool ReadsAccumulator(Bytecode bytecode);
+  static bool ReadsAccumulator(Bytecode bytecode) {
+    return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
+           AccumulatorUse::kRead;
+  }
 
   // Returns true if |bytecode| writes the accumulator.
-  static bool WritesAccumulator(Bytecode bytecode);
+  static bool WritesAccumulator(Bytecode bytecode) {
+    return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
+           AccumulatorUse::kWrite;
+  }
 
   // Return true if |bytecode| writes the accumulator with a boolean value.
-  static bool WritesBooleanToAccumulator(Bytecode bytecode);
+  static bool WritesBooleanToAccumulator(Bytecode bytecode) {
+    switch (bytecode) {
+      case Bytecode::kLdaTrue:
+      case Bytecode::kLdaFalse:
+      case Bytecode::kToBooleanLogicalNot:
+      case Bytecode::kLogicalNot:
+      case Bytecode::kTestEqual:
+      case Bytecode::kTestNotEqual:
+      case Bytecode::kTestEqualStrict:
+      case Bytecode::kTestLessThan:
+      case Bytecode::kTestLessThanOrEqual:
+      case Bytecode::kTestGreaterThan:
+      case Bytecode::kTestGreaterThanOrEqual:
+      case Bytecode::kTestInstanceOf:
+      case Bytecode::kTestIn:
+      case Bytecode::kForInContinue:
+        return true;
+      default:
+        return false;
+    }
+  }
 
   // Return true if |bytecode| is an accumulator load without effects,
   // e.g. LdaConstant, LdaTrue, Ldar.
-  static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
+  static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+    return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
+           bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
+           bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
+           bytecode == Bytecode::kLdaUndefined ||
+           bytecode == Bytecode::kLdaTheHole ||
+           bytecode == Bytecode::kLdaConstant;
+  }
+
+  // Return true if |bytecode| is a register load without effects,
+  // e.g. Mov, Star, LdrUndefined.
+  static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+    return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
+           bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar ||
+           bytecode == Bytecode::kLdrUndefined;
+  }
+
+  // Returns true if the bytecode is a conditional jump taking
+  // an immediate byte operand (OperandType::kImm).
+  static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
+    return bytecode == Bytecode::kJumpIfTrue ||
+           bytecode == Bytecode::kJumpIfFalse ||
+           bytecode == Bytecode::kJumpIfToBooleanTrue ||
+           bytecode == Bytecode::kJumpIfToBooleanFalse ||
+           bytecode == Bytecode::kJumpIfNotHole ||
+           bytecode == Bytecode::kJumpIfNull ||
+           bytecode == Bytecode::kJumpIfUndefined;
+  }
+
+  // Returns true if the bytecode is a conditional jump taking
+  // a constant pool entry (OperandType::kIdx).
+  static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
+    return bytecode == Bytecode::kJumpIfTrueConstant ||
+           bytecode == Bytecode::kJumpIfFalseConstant ||
+           bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+           bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+           bytecode == Bytecode::kJumpIfNotHoleConstant ||
+           bytecode == Bytecode::kJumpIfNullConstant ||
+           bytecode == Bytecode::kJumpIfUndefinedConstant;
+  }
+
+  // Returns true if the bytecode is a conditional jump taking
+  // any kind of operand.
+  static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
+    return IsConditionalJumpImmediate(bytecode) ||
+           IsConditionalJumpConstant(bytecode);
+  }
+
+  // Returns true if the bytecode is a jump or a conditional jump taking
+  // an immediate byte operand (OperandType::kImm).
+  static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
+    return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
+           IsConditionalJumpImmediate(bytecode);
+  }
+
+  // Returns true if the bytecode is a jump or conditional jump taking a
+  // constant pool entry (OperandType::kIdx).
+  static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
+    return bytecode == Bytecode::kJumpConstant ||
+           IsConditionalJumpConstant(bytecode);
+  }
+
+  // Returns true if the bytecode is a jump that internally coerces the
+  // accumulator to a boolean.
+  static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
+    return bytecode == Bytecode::kJumpIfToBooleanTrue ||
+           bytecode == Bytecode::kJumpIfToBooleanFalse ||
+           bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+           bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+  }
+
+  // Returns true if the bytecode is a jump or conditional jump taking
+  // any kind of operand.
+  static CONSTEXPR bool IsJump(Bytecode bytecode) {
+    return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
+  }
+
+  // Returns true if the bytecode is a conditional jump, a jump, or a return.
+  static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
+    return bytecode == Bytecode::kReturn || IsJump(bytecode);
+  }
 
   // Return true if |bytecode| is a jump without effects,
   // e.g.  any jump excluding those that include type coercion like
   // JumpIfTrueToBoolean.
-  static bool IsJumpWithoutEffects(Bytecode bytecode);
+  static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
+    return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+  }
 
-  // Return true if |bytecode| is a register load without effects,
-  // e.g. Mov, Star, LdrUndefined.
-  static bool IsRegisterLoadWithoutEffects(Bytecode bytecode);
+  // Returns true if |bytecode| has no effects. These bytecodes only manipulate
+  // interpreter frame state and will never throw.
+  static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
+    return (IsAccumulatorLoadWithoutEffects(bytecode) ||
+            IsRegisterLoadWithoutEffects(bytecode) ||
+            bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
+  }
 
-  // Returns true if |bytecode| has no effects.
-  static bool IsWithoutExternalSideEffects(Bytecode bytecode);
+  // Returns true if the bytecode is Ldar or Star.
+  static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
+    return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+  }
+
+  // Returns true if |bytecode| puts a name in the accumulator.
+  static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
+    return bytecode == Bytecode::kTypeOf;
+  }
+
+  // Returns true if the bytecode is a call or a constructor call.
+  static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
+    return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
+           bytecode == Bytecode::kNew;
+  }
+
+  // Returns true if the bytecode is a call to the runtime.
+  static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
+    return bytecode == Bytecode::kCallRuntime ||
+           bytecode == Bytecode::kCallRuntimeForPair ||
+           bytecode == Bytecode::kInvokeIntrinsic;
+  }
+
+  // Returns true if the bytecode is a scaling prefix bytecode.
+  static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
+    return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
+           bytecode == Bytecode::kDebugBreakExtraWide ||
+           bytecode == Bytecode::kDebugBreakWide;
+  }
+
+  // Returns the number of values which |bytecode| returns.
+  static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
+    return bytecode == Bytecode::kReturn ? 1 : 0;
+  }
+
+  // Returns the number of operands expected by |bytecode|.
+  static int NumberOfOperands(Bytecode bytecode) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    return kOperandCount[static_cast<size_t>(bytecode)];
+  }
 
   // Returns the i-th operand of |bytecode|.
-  static OperandType GetOperandType(Bytecode bytecode, int i);
+  static OperandType GetOperandType(Bytecode bytecode, int i) {
+    DCHECK_LE(bytecode, Bytecode::kLast);
+    DCHECK_LT(i, NumberOfOperands(bytecode));
+    DCHECK_GE(i, 0);
+    return GetOperandTypes(bytecode)[i];
+  }
 
   // Returns a pointer to an array of operand types terminated in
   // OperandType::kNone.
-  static const OperandType* GetOperandTypes(Bytecode bytecode);
+  static const OperandType* GetOperandTypes(Bytecode bytecode) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    return kOperandTypes[static_cast<size_t>(bytecode)];
+  }
 
-  // Returns a pointer to an array of operand type info terminated in
-  // OperandTypeInfo::kNone.
-  static const OperandTypeInfo* GetOperandTypeInfos(Bytecode bytecode);
+  static bool OperandIsScalableSignedByte(Bytecode bytecode,
+                                          int operand_index) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
+           OperandTypeInfo::kScalableSignedByte;
+  }
+
+  static bool OperandIsScalableUnsignedByte(Bytecode bytecode,
+                                            int operand_index) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
+           OperandTypeInfo::kScalableUnsignedByte;
+  }
+
+  static bool OperandIsScalable(Bytecode bytecode, int operand_index) {
+    return OperandIsScalableSignedByte(bytecode, operand_index) ||
+           OperandIsScalableUnsignedByte(bytecode, operand_index);
+  }
+
+  // Returns true if the bytecode has wider operand forms.
+  static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
 
   // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i,
-                                    OperandScale operand_scale);
+                                    OperandScale operand_scale) {
+    CHECK_LT(i, NumberOfOperands(bytecode));
+    return GetOperandSizes(bytecode, operand_scale)[i];
+  }
+
+  // Returns the operand sizes of |bytecode| with scale |operand_scale|.
+  static const OperandSize* GetOperandSizes(Bytecode bytecode,
+                                            OperandScale operand_scale) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    DCHECK_GE(operand_scale, OperandScale::kSingle);
+    DCHECK_LE(operand_scale, OperandScale::kLast);
+    STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+                  OperandScale::kLast == OperandScale::kQuadruple);
+    int scale_index = static_cast<int>(operand_scale) >> 1;
+    return kOperandSizes[static_cast<size_t>(bytecode)][scale_index];
+  }
 
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
   static int GetOperandOffset(Bytecode bytecode, int i,
                               OperandScale operand_scale);
 
-  // Returns a debug break bytecode to replace |bytecode|.
-  static Bytecode GetDebugBreak(Bytecode bytecode);
-
   // Returns the size of the bytecode including its operands for the
   // given |operand_scale|.
-  static int Size(Bytecode bytecode, OperandScale operand_scale);
+  static int Size(Bytecode bytecode, OperandScale operand_scale) {
+    DCHECK(bytecode <= Bytecode::kLast);
+    STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+                  OperandScale::kLast == OperandScale::kQuadruple);
+    int scale_index = static_cast<int>(operand_scale) >> 1;
+    return kBytecodeSizes[static_cast<size_t>(bytecode)][scale_index];
+  }
 
-  // Returns the size of |operand|.
-  static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
-
-  // Returns the number of values which |bytecode| returns.
-  static size_t ReturnCount(Bytecode bytecode);
-
-  // Returns true if the bytecode is a conditional jump taking
-  // an immediate byte operand (OperandType::kImm).
-  static bool IsConditionalJumpImmediate(Bytecode bytecode);
-
-  // Returns true if the bytecode is a conditional jump taking
-  // a constant pool entry (OperandType::kIdx).
-  static bool IsConditionalJumpConstant(Bytecode bytecode);
-
-  // Returns true if the bytecode is a conditional jump taking
-  // any kind of operand.
-  static bool IsConditionalJump(Bytecode bytecode);
-
-  // Returns true if the bytecode is a jump or a conditional jump taking
-  // an immediate byte operand (OperandType::kImm).
-  static bool IsJumpImmediate(Bytecode bytecode);
-
-  // Returns true if the bytecode is a jump or conditional jump taking a
-  // constant pool entry (OperandType::kIdx).
-  static bool IsJumpConstant(Bytecode bytecode);
-
-  // Returns true if the bytecode is a jump or conditional jump taking
-  // any kind of operand.
-  static bool IsJump(Bytecode bytecode);
-
-  // Returns true if the bytecode is a jump that internally coerces the
-  // accumulator to a boolean.
-  static bool IsJumpIfToBoolean(Bytecode bytecode);
+  // Returns a debug break bytecode to replace |bytecode|.
+  static Bytecode GetDebugBreak(Bytecode bytecode);
 
   // Returns the equivalent jump bytecode without the accumulator coercion.
   static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
 
-  // Returns true if the bytecode is a conditional jump, a jump, or a return.
-  static bool IsJumpOrReturn(Bytecode bytecode);
-
-  // Returns true if the bytecode is a call or a constructor call.
-  static bool IsCallOrNew(Bytecode bytecode);
-
-  // Returns true if the bytecode is a call to the runtime.
-  static bool IsCallRuntime(Bytecode bytecode);
-
   // Returns true if the bytecode is a debug break.
   static bool IsDebugBreak(Bytecode bytecode);
 
-  // Returns true if the bytecode is Ldar or Star.
-  static bool IsLdarOrStar(Bytecode bytecode);
-
-  // Returns true if the bytecode has wider operand forms.
-  static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
-
-  // Returns true if the bytecode is a scaling prefix bytecode.
-  static bool IsPrefixScalingBytecode(Bytecode bytecode);
-
-  // Returns true if |bytecode| puts a name in the accumulator.
-  static bool PutsNameInAccumulator(Bytecode bytecode);
-
   // Returns true if |operand_type| is any type of register operand.
   static bool IsRegisterOperandType(OperandType operand_type);
 
@@ -557,12 +648,30 @@
   static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
 
   // Returns the number of registers represented by a register operand. For
-  // instance, a RegPair represents two registers.
-  static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
+  // instance, a RegPair represents two registers. Should not be called for
+  // kRegList which has a variable number of registers based on the following
+  // kRegCount operand.
+  static int GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
+    switch (operand_type) {
+      case OperandType::kReg:
+      case OperandType::kRegOut:
+        return 1;
+      case OperandType::kRegPair:
+      case OperandType::kRegOutPair:
+        return 2;
+      case OperandType::kRegOutTriple:
+        return 3;
+      case OperandType::kRegList:
+        UNREACHABLE();
+        return 0;
+      default:
+        return 0;
+    }
+    return 0;
+  }
 
-  // Returns true if |operand_type| is a maybe register operand
-  // (kMaybeReg).
-  static bool IsMaybeRegisterOperandType(OperandType operand_type);
+  // Returns the size of |operand| for |operand_scale|.
+  static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
 
   // Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
   static bool IsRuntimeIdOperandType(OperandType operand_type);
@@ -576,18 +685,55 @@
   // OperandScale values.
   static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
 
-  // Return the operand size required to hold a signed operand.
-  static OperandSize SizeForSignedOperand(int value);
+  // Return the operand scale required to hold a signed operand with |value|.
+  static OperandScale ScaleForSignedOperand(int32_t value) {
+    if (value >= kMinInt8 && value <= kMaxInt8) {
+      return OperandScale::kSingle;
+    } else if (value >= kMinInt16 && value <= kMaxInt16) {
+      return OperandScale::kDouble;
+    } else {
+      return OperandScale::kQuadruple;
+    }
+  }
 
-  // Return the operand size required to hold an unsigned operand.
-  static OperandSize SizeForUnsignedOperand(uint32_t value);
+  // Return the operand scale required to hold an unsigned operand with |value|.
+  static OperandScale ScaleForUnsignedOperand(uint32_t value) {
+    if (value <= kMaxUInt8) {
+      return OperandScale::kSingle;
+    } else if (value <= kMaxUInt16) {
+      return OperandScale::kDouble;
+    } else {
+      return OperandScale::kQuadruple;
+    }
+  }
+
+  // Return the operand size required to hold an unsigned operand with |value|.
+  static OperandSize SizeForUnsignedOperand(uint32_t value) {
+    if (value <= kMaxUInt8) {
+      return OperandSize::kByte;
+    } else if (value <= kMaxUInt16) {
+      return OperandSize::kShort;
+    } else {
+      return OperandSize::kQuad;
+    }
+  }
+
+ private:
+  static const OperandType* const kOperandTypes[];
+  static const OperandTypeInfo* const kOperandTypeInfos[];
+  static const int kOperandCount[];
+  static const int kNumberOfRegisterOperands[];
+  static const AccumulatorUse kAccumulatorUse[];
+  static const bool kIsScalable[];
+  static const int kBytecodeSizes[][3];
+  static const OperandSize* const kOperandSizes[][3];
 };
 
+// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
+// See crbug.com/603131.
+#undef CONSTEXPR
+
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index ff3823f..d2b7995 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/constant-array-builder.h"
 
+#include <functional>
 #include <set>
 
 #include "src/isolate.h"
@@ -72,9 +73,11 @@
 
 ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
                                            Handle<Object> the_hole_value)
-    : constants_map_(zone),
+    : constants_map_(16, base::KeyEqualityMatcher<Address>(),
+                     ZoneAllocationPolicy(zone)),
       smi_map_(zone),
       smi_pairs_(zone),
+      zone_(zone),
       the_hole_value_(the_hole_value) {
   idx_slice_[0] =
       new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
@@ -153,16 +156,11 @@
 }
 
 size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
-  auto entry = constants_map_.find(object.address());
-  return (entry == constants_map_.end()) ? AllocateEntry(object)
-                                         : entry->second;
-}
-
-ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
-    Handle<Object> object) {
-  index_t index = AllocateIndex(object);
-  constants_map_[object.address()] = index;
-  return index;
+  return constants_map_
+      .LookupOrInsert(object.address(), ObjectHash(object.address()),
+                      [&]() { return AllocateIndex(object); },
+                      ZoneAllocationPolicy(zone_))
+      ->value;
 }
 
 ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index 2018f25..78d36f5 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -7,7 +7,7 @@
 
 #include "src/identity-map.h"
 #include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -70,7 +70,6 @@
  private:
   typedef uint32_t index_t;
 
-  index_t AllocateEntry(Handle<Object> object);
   index_t AllocateIndex(Handle<Object> object);
   index_t AllocateReservedEntry(Smi* value);
 
@@ -108,9 +107,12 @@
   Handle<Object> the_hole_value() const { return the_hole_value_; }
 
   ConstantArraySlice* idx_slice_[3];
-  ZoneMap<Address, index_t> constants_map_;
+  base::TemplateHashMapImpl<Address, index_t, base::KeyEqualityMatcher<Address>,
+                            ZoneAllocationPolicy>
+      constants_map_;
   ZoneMap<Smi*, index_t> smi_map_;
   ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+  Zone* zone_;
   Handle<Object> the_hole_value_;
 };
 
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 56cd481..0e71b96 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -60,18 +60,14 @@
   }
 }
 
-void LoopBuilder::JumpToHeader() {
+void LoopBuilder::JumpToHeader(int loop_depth) {
+  // Pass the proper loop nesting level to the backwards branch, to trigger
+  // on-stack replacement when armed for the given loop nesting depth.
+  int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
   // Loop must have closed form, i.e. all loop elements are within the loop,
   // the loop header precedes the body and next elements in the loop.
   DCHECK(loop_header_.is_bound());
-  builder()->Jump(&loop_header_);
-}
-
-void LoopBuilder::JumpToHeaderIfTrue() {
-  // Loop must have closed form, i.e. all loop elements are within the loop,
-  // the loop header precedes the body and next elements in the loop.
-  DCHECK(loop_header_.is_bound());
-  builder()->JumpIfTrue(&loop_header_);
+  builder()->JumpLoop(&loop_header_, level);
 }
 
 void LoopBuilder::EndLoop() {
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 5cd9b5b..3174db5 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -8,7 +8,7 @@
 #include "src/interpreter/bytecode-array-builder.h"
 
 #include "src/interpreter/bytecode-label.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -86,8 +86,7 @@
   ~LoopBuilder();
 
   void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
-  void JumpToHeader();
-  void JumpToHeaderIfTrue();
+  void JumpToHeader(int loop_depth);
   void BindContinueTarget();
   void EndLoop();
 
@@ -99,9 +98,6 @@
   void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_labels_); }
   void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
 
-  BytecodeLabels* header_labels() { return &header_labels_; }
-  BytecodeLabels* continue_labels() { return &continue_labels_; }
-
  private:
   BytecodeLabel loop_header_;
 
diff --git a/src/interpreter/handler-table-builder.h b/src/interpreter/handler-table-builder.h
index 26c45f4..25147ca 100644
--- a/src/interpreter/handler-table-builder.h
+++ b/src/interpreter/handler-table-builder.h
@@ -8,7 +8,7 @@
 #include "src/handles.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 227fd39..5767ffa 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -14,7 +14,7 @@
 #include "src/interpreter/interpreter.h"
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -84,6 +84,71 @@
   StoreRegister(value, Register::current_context());
 }
 
+Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
+  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
+  cur_context.Bind(context);
+
+  Variable cur_depth(this, MachineRepresentation::kWord32);
+  cur_depth.Bind(depth);
+
+  Label context_found(this);
+
+  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
+  Label context_search(this, 2, context_search_loop_variables);
+
+  // Fast path if the depth is 0.
+  BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
+
+  // Loop until the depth is 0.
+  Bind(&context_search);
+  {
+    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
+    cur_context.Bind(
+        LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+
+    BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
+                        &context_search);
+  }
+
+  Bind(&context_found);
+  return cur_context.value();
+}
+
+void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
+                                                              Node* depth,
+                                                              Label* target) {
+  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
+  cur_context.Bind(context);
+
+  Variable cur_depth(this, MachineRepresentation::kWord32);
+  cur_depth.Bind(depth);
+
+  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
+  Label context_search(this, 2, context_search_loop_variables);
+
+  // Loop until the depth is 0.
+  Goto(&context_search);
+  Bind(&context_search);
+  {
+    // TODO(leszeks): We only need to do this check if the context had a sloppy
+    // eval, we could pass in a context chain bitmask to figure out which
+    // contexts actually need to be checked.
+
+    Node* extension_slot =
+        LoadContextSlot(cur_context.value(), Context::EXTENSION_INDEX);
+
+    // Jump to the target if the extension slot is not a hole.
+    GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
+
+    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
+    cur_context.Bind(
+        LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+
+    GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
+           &context_search);
+  }
+}
+
 Node* InterpreterAssembler::BytecodeOffset() {
   return bytecode_offset_.value();
 }
@@ -341,6 +406,14 @@
   return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
+Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
+  DCHECK_EQ(OperandType::kUImm,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
   DCHECK_EQ(OperandType::kImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -460,6 +533,18 @@
   }
 }
 
+Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
+                                               Node* slot_id) {
+  Comment("increment call count");
+  Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+  Node* call_count =
+      LoadFixedArrayElement(type_feedback_vector, call_count_slot);
+  Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+  // Count is Smi, so we don't need a write barrier.
+  return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
+                                new_count, SKIP_WRITE_BARRIER);
+}
+
 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
                                                Node* first_arg, Node* arg_count,
                                                Node* slot_id,
@@ -481,15 +566,16 @@
                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
 
   Variable return_value(this, MachineRepresentation::kTagged);
-  Label handle_monomorphic(this), extra_checks(this), end(this), call(this);
+  Label handle_monomorphic(this), extra_checks(this), end(this), call(this),
+      call_function(this), call_without_feedback(this);
 
   // Slot id of 0 is used to indicate no typefeedback is available. Call using
   // call builtin.
   STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
   Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
-  GotoIf(is_feedback_unavailable, &call);
+  GotoIf(is_feedback_unavailable, &call_without_feedback);
 
-  // The checks. First, does rdi match the recorded monomorphic target?
+  // The checks. First, does function match the recorded monomorphic target?
   Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
   Node* feedback_value = LoadWeakCellValue(feedback_element);
   Node* is_monomorphic = WordEqual(function, feedback_value);
@@ -503,13 +589,7 @@
     GotoIf(is_smi, &extra_checks);
 
     // Increment the call count.
-    Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
-    Node* call_count =
-        LoadFixedArrayElement(type_feedback_vector, call_count_slot);
-    Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
-    // Count is Smi, so we don't need a write barrier.
-    StoreFixedArrayElement(type_feedback_vector, call_count_slot, new_count,
-                           SKIP_WRITE_BARRIER);
+    IncrementCallCount(type_feedback_vector, slot_id);
 
     // Call using call function builtin.
     Callable callable = CodeFactory::InterpreterPushArgsAndCall(
@@ -523,12 +603,42 @@
 
   Bind(&extra_checks);
   {
-    Label check_initialized(this, Label::kDeferred), mark_megamorphic(this);
+    Label check_initialized(this, Label::kDeferred), mark_megamorphic(this),
+        check_allocation_site(this),
+        create_allocation_site(this, Label::kDeferred);
     // Check if it is a megamorphic target
     Node* is_megamorphic = WordEqual(
         feedback_element,
         HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-    BranchIf(is_megamorphic, &call, &check_initialized);
+    BranchIf(is_megamorphic, &call, &check_allocation_site);
+
+    Bind(&check_allocation_site);
+    {
+      Node* is_allocation_site =
+          WordEqual(LoadMap(feedback_element),
+                    LoadRoot(Heap::kAllocationSiteMapRootIndex));
+      GotoUnless(is_allocation_site, &check_initialized);
+
+      // If it is not the Array() function, mark megamorphic.
+      Node* context_slot =
+          LoadFixedArrayElement(LoadNativeContext(context),
+                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* is_array_function = WordEqual(context_slot, function);
+      GotoUnless(is_array_function, &mark_megamorphic);
+
+      // It is a monomorphic Array function. Increment the call count.
+      IncrementCallCount(type_feedback_vector, slot_id);
+
+      // Call ArrayConstructorStub.
+      Callable callable_call =
+          CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
+      Node* code_target_call = HeapConstant(callable_call.code());
+      Node* ret_value =
+          CallStub(callable_call.descriptor(), code_target_call, context,
+                   arg_count, function, feedback_element, first_arg);
+      return_value.Bind(ret_value);
+      Goto(&end);
+    }
 
     Bind(&check_initialized);
     {
@@ -548,12 +658,12 @@
           WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
       GotoUnless(is_js_function, &mark_megamorphic);
 
-      // Check that it is not the Array() function.
+      // Check if it is the Array() function.
       Node* context_slot =
           LoadFixedArrayElement(LoadNativeContext(context),
                                 Int32Constant(Context::ARRAY_FUNCTION_INDEX));
       Node* is_array_function = WordEqual(context_slot, function);
-      GotoIf(is_array_function, &mark_megamorphic);
+      GotoIf(is_array_function, &create_allocation_site);
 
       // Check if the function belongs to the same native context
       Node* native_context = LoadNativeContext(
@@ -562,23 +672,22 @@
           WordEqual(native_context, LoadNativeContext(context));
       GotoUnless(is_same_native_context, &mark_megamorphic);
 
-      // Initialize it to a monomorphic target.
-      Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
-      // Count is Smi, so we don't need a write barrier.
-      StoreFixedArrayElement(type_feedback_vector, call_count_slot,
-                             SmiTag(Int32Constant(1)), SKIP_WRITE_BARRIER);
-
       CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
                                      function);
 
       // Call using call function builtin.
-      Callable callable = CodeFactory::InterpreterPushArgsAndCall(
-          isolate(), tail_call_mode, CallableType::kJSFunction);
-      Node* code_target = HeapConstant(callable.code());
-      Node* ret_value = CallStub(callable.descriptor(), code_target, context,
-                                 arg_count, first_arg, function);
-      return_value.Bind(ret_value);
-      Goto(&end);
+      Goto(&call_function);
+    }
+
+    Bind(&create_allocation_site);
+    {
+      CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+                                           SmiTag(slot_id));
+
+      // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
+      // They start collecting feedback only when a call is executed the second
+      // time. So, do not pass any feedback here.
+      Goto(&call_function);
     }
 
     Bind(&mark_megamorphic);
@@ -595,8 +704,37 @@
     }
   }
 
+  Bind(&call_function);
+  {
+    // Increment the call count.
+    IncrementCallCount(type_feedback_vector, slot_id);
+
+    Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+        isolate(), tail_call_mode, CallableType::kJSFunction);
+    Node* code_target_call = HeapConstant(callable_call.code());
+    Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+                               context, arg_count, first_arg, function);
+    return_value.Bind(ret_value);
+    Goto(&end);
+  }
+
   Bind(&call);
   {
+    // Increment the call count.
+    IncrementCallCount(type_feedback_vector, slot_id);
+
+    // Call using call builtin.
+    Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+        isolate(), tail_call_mode, CallableType::kAny);
+    Node* code_target_call = HeapConstant(callable_call.code());
+    Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+                               context, arg_count, first_arg, function);
+    return_value.Bind(ret_value);
+    Goto(&end);
+  }
+
+  Bind(&call_without_feedback);
+  {
     // Call using call builtin.
     Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
         isolate(), tail_call_mode, CallableType::kAny);
@@ -623,11 +761,169 @@
 
 Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
                                           Node* new_target, Node* first_arg,
-                                          Node* arg_count) {
-  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
-  Node* code_target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), code_target, context, arg_count,
-                  new_target, constructor, first_arg);
+                                          Node* arg_count, Node* slot_id,
+                                          Node* type_feedback_vector) {
+  Label call_construct(this), js_function(this), end(this);
+  Variable return_value(this, MachineRepresentation::kTagged);
+  Variable allocation_feedback(this, MachineRepresentation::kTagged);
+  allocation_feedback.Bind(UndefinedConstant());
+
+  // Slot id of 0 is used to indicate no type feedback is available.
+  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+  GotoIf(is_feedback_unavailable, &call_construct);
+
+  // Check that the constructor is not a smi.
+  Node* is_smi = WordIsSmi(constructor);
+  GotoIf(is_smi, &call_construct);
+
+  // Check that constructor is a JSFunction.
+  Node* instance_type = LoadInstanceType(constructor);
+  Node* is_js_function =
+      WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+  BranchIf(is_js_function, &js_function, &call_construct);
+
+  Bind(&js_function);
+  {
+    // Cache the called function in a feedback vector slot. Cache states
+    // are uninitialized, monomorphic (indicated by a JSFunction), and
+    // megamorphic.
+    // TODO(mythria/v8:5210): Check if it is better to mark extra_checks as a
+    // deferred block so that call_construct_function will be scheduled.
+    Label extra_checks(this), call_construct_function(this);
+
+    Node* feedback_element =
+        LoadFixedArrayElement(type_feedback_vector, slot_id);
+    Node* feedback_value = LoadWeakCellValue(feedback_element);
+    Node* is_monomorphic = WordEqual(constructor, feedback_value);
+    BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
+
+    Bind(&extra_checks);
+    {
+      Label mark_megamorphic(this), initialize(this),
+          check_allocation_site(this), check_initialized(this),
+          set_alloc_feedback_and_call(this);
+      {
+        // Check if it is a megamorphic target
+        Comment("check if megamorphic");
+        Node* is_megamorphic = WordEqual(
+            feedback_element,
+            HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+        GotoIf(is_megamorphic, &call_construct_function);
+
+        Comment("check if weak cell");
+        Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
+                                       LoadRoot(Heap::kWeakCellMapRootIndex));
+        GotoUnless(is_weak_cell, &check_allocation_site);
+        // If the weak cell is cleared, we have a new chance to become
+        // monomorphic.
+        Comment("check if weak cell is cleared");
+        Node* is_smi = WordIsSmi(feedback_value);
+        BranchIf(is_smi, &initialize, &mark_megamorphic);
+      }
+
+      Bind(&check_allocation_site);
+      {
+        Comment("check if it is an allocation site");
+        Node* is_allocation_site =
+            WordEqual(LoadObjectField(feedback_element, 0),
+                      LoadRoot(Heap::kAllocationSiteMapRootIndex));
+        GotoUnless(is_allocation_site, &check_initialized);
+
+        // Make sure the function is the Array() function
+        Node* context_slot =
+            LoadFixedArrayElement(LoadNativeContext(context),
+                                  Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+        Node* is_array_function = WordEqual(context_slot, constructor);
+        BranchIf(is_array_function, &set_alloc_feedback_and_call,
+                 &mark_megamorphic);
+      }
+
+      Bind(&set_alloc_feedback_and_call);
+      {
+        allocation_feedback.Bind(feedback_element);
+        Goto(&call_construct_function);
+      }
+
+      Bind(&check_initialized);
+      {
+        // Check if it is uninitialized.
+        Comment("check if uninitialized");
+        Node* is_uninitialized = WordEqual(
+            feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+        BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
+      }
+
+      Bind(&initialize);
+      {
+        Label create_weak_cell(this), create_allocation_site(this);
+        Comment("initialize the feedback element");
+        // Check that it is the Array() function.
+        Node* context_slot =
+            LoadFixedArrayElement(LoadNativeContext(context),
+                                  Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+        Node* is_array_function = WordEqual(context_slot, constructor);
+        BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
+
+        Bind(&create_allocation_site);
+        {
+          Node* site = CreateAllocationSiteInFeedbackVector(
+              type_feedback_vector, SmiTag(slot_id));
+          allocation_feedback.Bind(site);
+          Goto(&call_construct_function);
+        }
+
+        Bind(&create_weak_cell);
+        {
+          CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+                                         constructor);
+          Goto(&call_construct_function);
+        }
+      }
+
+      Bind(&mark_megamorphic);
+      {
+        // MegamorphicSentinel is an immortal immovable object so
+        // write-barrier is not needed.
+        Comment("transition to megamorphic");
+        DCHECK(
+            Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+        StoreFixedArrayElement(
+            type_feedback_vector, slot_id,
+            HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+            SKIP_WRITE_BARRIER);
+        Goto(&call_construct_function);
+      }
+    }
+
+    Bind(&call_construct_function);
+    {
+      Comment("call using callConstructFunction");
+      IncrementCallCount(type_feedback_vector, slot_id);
+      Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
+          isolate(), CallableType::kJSFunction);
+      return_value.Bind(CallStub(callable_function.descriptor(),
+                                 HeapConstant(callable_function.code()),
+                                 context, arg_count, new_target, constructor,
+                                 allocation_feedback.value(), first_arg));
+      Goto(&end);
+    }
+  }
+
+  Bind(&call_construct);
+  {
+    Comment("call using callConstruct builtin");
+    Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
+        isolate(), CallableType::kAny);
+    Node* code_target = HeapConstant(callable.code());
+    return_value.Bind(CallStub(callable.descriptor(), code_target, context,
+                               arg_count, new_target, constructor,
+                               UndefinedConstant(), first_arg));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return return_value.value();
 }
 
 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
@@ -651,6 +947,9 @@
 }
 
 void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
+  // TODO(rmcilroy): It might be worthwhile to only update the budget for
+  // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
+
   Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
   Node* budget_offset =
       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index b3fa42f..9dda20a 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -32,6 +32,9 @@
   // Returns the index immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandIdx(int operand_index);
+  // Returns the UImm8 immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandUImm(int operand_index);
   // Returns the Imm8 immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandImm(int operand_index);
@@ -53,6 +56,15 @@
   compiler::Node* GetContext();
   void SetContext(compiler::Node* value);
 
+  // Context at |depth| in the context chain starting at |context|.
+  compiler::Node* GetContextAtDepth(compiler::Node* context,
+                                    compiler::Node* depth);
+
+  // Goto the given |target| if the context chain starting at |context| has any
+  // extensions up to the given |depth|.
+  void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
+                                          compiler::Node* depth, Label* target);
+
   // Number of registers.
   compiler::Node* RegisterCount();
 
@@ -92,6 +104,11 @@
   // Load the TypeFeedbackVector for the current function.
   compiler::Node* LoadTypeFeedbackVector();
 
+  // Increment the call count for a CALL_IC or construct call.
+  // The call count is located at feedback_vector[slot_id + 1].
+  compiler::Node* IncrementCallCount(compiler::Node* type_feedback_vector,
+                                     compiler::Node* slot_id);
+
   // Call JSFunction or Callable |function| with |arg_count|
   // arguments (not including receiver) and the first argument
   // located at |first_arg|. Type feedback is collected in the
@@ -120,7 +137,9 @@
                                 compiler::Node* context,
                                 compiler::Node* new_target,
                                 compiler::Node* first_arg,
-                                compiler::Node* arg_count);
+                                compiler::Node* arg_count,
+                                compiler::Node* slot_id,
+                                compiler::Node* type_feedback_vector);
 
   // Call runtime function with |arg_count| arguments and the first argument
   // located at |first_arg|.
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 68f0342..4100302 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -9,6 +9,7 @@
 
 #include "src/ast/prettyprinter.h"
 #include "src/code-factory.h"
+#include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/factory.h"
 #include "src/interpreter/bytecode-flags.h"
@@ -17,7 +18,7 @@
 #include "src/interpreter/interpreter-assembler.h"
 #include "src/interpreter/interpreter-intrinsics.h"
 #include "src/log.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -150,14 +151,39 @@
 }
 
 InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
-    : CompilationJob(info, "Ignition"), generator_(info) {}
+    : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
 
 InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
+  if (FLAG_print_bytecode || FLAG_print_ast) {
+    OFStream os(stdout);
+    std::unique_ptr<char[]> name = info()->GetDebugName();
+    os << "[generating bytecode for function: " << info()->GetDebugName().get()
+       << "]" << std::endl
+       << std::flush;
+  }
+
+#ifdef DEBUG
+  if (info()->parse_info() && FLAG_print_ast) {
+    OFStream os(stdout);
+    os << "--- AST ---" << std::endl
+       << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
+       << std::endl
+       << std::flush;
+  }
+#endif  // DEBUG
+
   return SUCCEEDED;
 }
 
 InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
-  generator()->GenerateBytecode();
+  // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
+  // timers.
+  RuntimeCallTimerScope runtimeTimer(info()->isolate(),
+                                     &RuntimeCallStats::CompileIgnition);
+  TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
+
+  generator()->GenerateBytecode(stack_limit());
 
   if (generator()->HasStackOverflow()) {
     return FAILED;
@@ -182,34 +208,8 @@
   return SUCCEEDED;
 }
 
-bool Interpreter::MakeBytecode(CompilationInfo* info) {
-  RuntimeCallTimerScope runtimeTimer(info->isolate(),
-                                     &RuntimeCallStats::CompileIgnition);
-  TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      info->isolate(), &tracing::TraceEventStatsTable::CompileIgnition);
-
-  if (FLAG_print_bytecode || FLAG_print_ast) {
-    OFStream os(stdout);
-    std::unique_ptr<char[]> name = info->GetDebugName();
-    os << "[generating bytecode for function: " << info->GetDebugName().get()
-       << "]" << std::endl
-       << std::flush;
-  }
-
-#ifdef DEBUG
-  if (info->parse_info() && FLAG_print_ast) {
-    OFStream os(stdout);
-    os << "--- AST ---" << std::endl
-       << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
-       << std::flush;
-  }
-#endif  // DEBUG
-
-  InterpreterCompilationJob job(info);
-  if (job.PrepareJob() != CompilationJob::SUCCEEDED) return false;
-  if (job.ExecuteJob() != CompilationJob::SUCCEEDED) return false;
-  return job.FinalizeJob() == CompilationJob::SUCCEEDED;
+CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
+  return new InterpreterCompilationJob(info);
 }
 
 bool Interpreter::IsDispatchTableInitialized() {
@@ -421,16 +421,14 @@
   __ Dispatch();
 }
 
-Node* Interpreter::BuildLoadGlobal(Callable ic,
+Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
+                                   Node* feedback_slot,
                                    InterpreterAssembler* assembler) {
   typedef LoadGlobalWithVectorDescriptor Descriptor;
-  // Get the global object.
-  Node* context = __ GetContext();
 
   // Load the global via the LoadGlobalIC.
   Node* code_target = __ HeapConstant(ic.code());
-  Node* raw_slot = __ BytecodeOperandIdx(0);
-  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* smi_slot = __ SmiTag(feedback_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   return __ CallStub(ic.descriptor(), code_target, context,
                      Arg(Descriptor::kSlot, smi_slot),
@@ -444,7 +442,11 @@
 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-  Node* result = BuildLoadGlobal(ic, assembler);
+
+  Node* context = __ GetContext();
+
+  Node* raw_slot = __ BytecodeOperandIdx(0);
+  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -456,7 +458,11 @@
 void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-  Node* result = BuildLoadGlobal(ic, assembler);
+
+  Node* context = __ GetContext();
+
+  Node* raw_slot = __ BytecodeOperandIdx(0);
+  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
   Node* destination = __ BytecodeOperandReg(1);
   __ StoreRegister(result, destination);
   __ Dispatch();
@@ -469,7 +475,11 @@
 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
-  Node* result = BuildLoadGlobal(ic, assembler);
+
+  Node* context = __ GetContext();
+
+  Node* raw_slot = __ BytecodeOperandIdx(0);
+  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -520,44 +530,51 @@
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   Node* slot_index = __ BytecodeOperandIdx(1);
-  return __ LoadContextSlot(context, slot_index);
+  Node* depth = __ BytecodeOperandUImm(2);
+  Node* slot_context = __ GetContextAtDepth(context, depth);
+  return __ LoadContextSlot(slot_context, slot_index);
 }
 
-// LdaContextSlot <context> <slot_index>
+// LdaContextSlot <context> <slot_index> <depth>
 //
-// Load the object in |slot_index| of |context| into the accumulator.
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain starting at |context| into the accumulator.
 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
   Node* result = BuildLoadContextSlot(assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LdrContextSlot <context> <slot_index> <reg>
+// LdrContextSlot <context> <slot_index> <depth> <reg>
 //
-// Load the object in <slot_index> of <context> into register <reg>.
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain of |context| into register |reg|.
 void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
   Node* result = BuildLoadContextSlot(assembler);
-  Node* destination = __ BytecodeOperandReg(2);
+  Node* destination = __ BytecodeOperandReg(3);
   __ StoreRegister(result, destination);
   __ Dispatch();
 }
 
-// StaContextSlot <context> <slot_index>
+// StaContextSlot <context> <slot_index> <depth>
 //
-// Stores the object in the accumulator into |slot_index| of |context|.
+// Stores the object in the accumulator into |slot_index| of the context at
+// |depth| in the context chain starting at |context|.
 void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   Node* slot_index = __ BytecodeOperandIdx(1);
-  __ StoreContextSlot(context, slot_index, value);
+  Node* depth = __ BytecodeOperandUImm(2);
+  Node* slot_context = __ GetContextAtDepth(context, depth);
+  __ StoreContextSlot(slot_context, slot_index, value);
   __ Dispatch();
 }
 
 void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
                                   InterpreterAssembler* assembler) {
-  Node* index = __ BytecodeOperandIdx(0);
-  Node* name = __ LoadConstantPoolEntry(index);
+  Node* name_index = __ BytecodeOperandIdx(0);
+  Node* name = __ LoadConstantPoolEntry(name_index);
   Node* context = __ GetContext();
   Node* result = __ CallRuntime(function_id, context, name);
   __ SetAccumulator(result);
@@ -580,6 +597,103 @@
   DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
+void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
+                                         InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  Node* name_index = __ BytecodeOperandIdx(0);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  Node* depth = __ BytecodeOperandUImm(2);
+
+  Label slowpath(assembler, Label::kDeferred);
+
+  // Check for context extensions to allow the fast path.
+  __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
+
+  // Fast path does a normal load context.
+  {
+    Node* slot_context = __ GetContextAtDepth(context, depth);
+    Node* result = __ LoadContextSlot(slot_context, slot_index);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+
+  // Slow path when we have to call out to the runtime.
+  __ Bind(&slowpath);
+  {
+    Node* name = __ LoadConstantPoolEntry(name_index);
+    Node* result = __ CallRuntime(function_id, context, name);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+}
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) {
+  DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupContextSlotInsideTypeof(
+    InterpreterAssembler* assembler) {
+  DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+}
+
+void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
+                                        InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  Node* name_index = __ BytecodeOperandIdx(0);
+  Node* feedback_slot = __ BytecodeOperandIdx(1);
+  Node* depth = __ BytecodeOperandUImm(2);
+
+  Label slowpath(assembler, Label::kDeferred);
+
+  // Check for context extensions to allow the fast path
+  __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
+
+  // Fast path does a normal load global
+  {
+    Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(
+        isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
+                      ? INSIDE_TYPEOF
+                      : NOT_INSIDE_TYPEOF);
+    Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+
+  // Slow path when we have to call out to the runtime
+  __ Bind(&slowpath);
+  {
+    Node* name = __ LoadConstantPoolEntry(name_index);
+    Node* result = __ CallRuntime(function_id, context, name);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+}
+
+// LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) {
+  DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+// LdaLookupGlobalSlotInsideTypeof <name_index> <feedback_slot> <depth>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupGlobalSlotInsideTypeof(
+    InterpreterAssembler* assembler) {
+  DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+}
+
 void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
                                   InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
@@ -816,6 +930,80 @@
   __ Dispatch();
 }
 
+template <class Generator>
+void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* lhs = __ LoadRegister(reg_index);
+  Node* rhs = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+  // TODO(interpreter): the only reason this check is here is because we
+  // sometimes emit comparisons that shouldn't collect feedback (e.g.
+  // try-finally blocks and generators), and we could get rid of this by
+  // introducing Smi equality tests.
+  Label skip_feedback_update(assembler);
+  __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
+            &skip_feedback_update);
+
+  Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+  Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
+      gather_rhs_type(assembler), do_compare(assembler);
+  __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+  __ Bind(&lhs_is_smi);
+  var_type_feedback.Bind(
+      __ Int32Constant(CompareOperationFeedback::kSignedSmall));
+  __ Goto(&gather_rhs_type);
+
+  __ Bind(&lhs_is_not_smi);
+  {
+    Label lhs_is_number(assembler), lhs_is_not_number(assembler);
+    Node* lhs_map = __ LoadMap(lhs);
+    __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
+              &lhs_is_not_number);
+
+    __ Bind(&lhs_is_number);
+    var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
+    __ Goto(&gather_rhs_type);
+
+    __ Bind(&lhs_is_not_number);
+    var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
+    __ Goto(&do_compare);
+  }
+
+  __ Bind(&gather_rhs_type);
+  {
+    Label rhs_is_smi(assembler);
+    __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+
+    Node* rhs_map = __ LoadMap(rhs);
+    Node* rhs_type =
+        __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
+                  __ Int32Constant(CompareOperationFeedback::kNumber),
+                  __ Int32Constant(CompareOperationFeedback::kAny));
+    var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
+    __ Goto(&do_compare);
+
+    __ Bind(&rhs_is_smi);
+    var_type_feedback.Bind(
+        __ Word32Or(var_type_feedback.value(),
+                    __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
+    __ Goto(&do_compare);
+  }
+
+  __ Bind(&do_compare);
+  __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+                    slot_index);
+  __ Goto(&skip_feedback_update);
+
+  __ Bind(&skip_feedback_update);
+  Node* result = Generator::Generate(assembler, lhs, rhs, context);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
 // Add <src>
 //
 // Add register <src> to accumulator.
@@ -1227,25 +1415,29 @@
 
 // ToName
 //
-// Cast the object referenced by the accumulator to a name.
+// Convert the object referenced by the accumulator to a name.
 void Interpreter::DoToName(InterpreterAssembler* assembler) {
-  Node* result = BuildUnaryOp(CodeFactory::ToName(isolate_), assembler);
+  Node* object = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ ToName(context, object);
   __ StoreRegister(result, __ BytecodeOperandReg(0));
   __ Dispatch();
 }
 
 // ToNumber
 //
-// Cast the object referenced by the accumulator to a number.
+// Convert the object referenced by the accumulator to a number.
 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
-  Node* result = BuildUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+  Node* object = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ ToNumber(context, object);
   __ StoreRegister(result, __ BytecodeOperandReg(0));
   __ Dispatch();
 }
 
 // ToObject
 //
-// Cast the object referenced by the accumulator to a JSObject.
+// Convert the object referenced by the accumulator to a JSReceiver.
 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
   Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
   __ StoreRegister(result, __ BytecodeOperandReg(0));
@@ -1395,7 +1587,12 @@
   DoJSCall(assembler, TailCallMode::kAllow);
 }
 
-void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
   Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
@@ -1406,15 +1603,6 @@
   __ Dispatch();
 }
 
-// CallRuntime <function_id> <first_arg> <arg_count>
-//
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
-  DoCallRuntimeCommon(assembler);
-}
-
 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
 //
 // Implements the semantic equivalent of calling the runtime function
@@ -1432,7 +1620,13 @@
   __ Dispatch();
 }
 
-void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
   // Call the runtime function.
   Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
@@ -1452,17 +1646,11 @@
   __ Dispatch();
 }
 
-// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+// CallJSRuntime <context_index> <receiver> <arg_count>
 //
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
-  DoCallRuntimeForPairCommon(assembler);
-}
-
-void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(receiver_reg);
@@ -1483,29 +1671,6 @@
   __ Dispatch();
 }
 
-// CallJSRuntime <context_index> <receiver> <arg_count>
-//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
-  DoCallJSRuntimeCommon(assembler);
-}
-
-void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
-  Node* new_target = __ GetAccumulator();
-  Node* constructor_reg = __ BytecodeOperandReg(0);
-  Node* constructor = __ LoadRegister(constructor_reg);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallConstruct(constructor, context, new_target, first_arg, args_count);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
 // New <constructor> <first_arg> <arg_count>
 //
 // Call operator new with |constructor| and the first argument in
@@ -1513,42 +1678,55 @@
 // registers. The new.target is in the accumulator.
 //
 void Interpreter::DoNew(InterpreterAssembler* assembler) {
-  DoCallConstruct(assembler);
+  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+  Node* new_target = __ GetAccumulator();
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* slot_id = __ BytecodeOperandIdx(3);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* context = __ GetContext();
+  Node* result = __ CallConstruct(constructor, context, new_target, first_arg,
+                                  args_count, slot_id, type_feedback_vector);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp<EqualStub>(assembler);
+  DoCompareOpWithFeedback<EqualStub>(assembler);
 }
 
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp<NotEqualStub>(assembler);
+  DoCompareOpWithFeedback<NotEqualStub>(assembler);
 }
 
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp<StrictEqualStub>(assembler);
+  DoCompareOpWithFeedback<StrictEqualStub>(assembler);
 }
 
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoBinaryOp<LessThanStub>(assembler);
+  DoCompareOpWithFeedback<LessThanStub>(assembler);
 }
 
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoBinaryOp<GreaterThanStub>(assembler);
+  DoCompareOpWithFeedback<GreaterThanStub>(assembler);
 }
 
 // TestLessThanOrEqual <src>
@@ -1556,7 +1734,7 @@
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp<LessThanOrEqualStub>(assembler);
+  DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
 }
 
 // TestGreaterThanOrEqual <src>
@@ -1564,7 +1742,7 @@
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp<GreaterThanOrEqualStub>(assembler);
+  DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
 }
 
 // TestIn <src>
@@ -1783,6 +1961,35 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
+// JumpLoop <imm> <loop_depth>
+//
+// Jump by number of bytes represented by the immediate operand |imm|. Also
+// performs a loop nesting check and potentially triggers OSR in case the
+// current OSR level matches (or exceeds) the specified |loop_depth|.
+void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* loop_depth = __ BytecodeOperandImm(1);
+  Node* osr_level = __ LoadOSRNestingLevel();
+
+  // Check if OSR points at the given {loop_depth} are armed by comparing it to
+  // the current {osr_level} loaded from the header of the BytecodeArray.
+  Label ok(assembler), osr_armed(assembler, Label::kDeferred);
+  Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
+  __ Branch(condition, &ok, &osr_armed);
+
+  __ Bind(&ok);
+  __ Jump(relative_jump);
+
+  __ Bind(&osr_armed);
+  {
+    Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
+    Node* target = __ HeapConstant(callable.code());
+    Node* context = __ GetContext();
+    __ CallStub(callable.descriptor(), target, context);
+    __ Jump(relative_jump);
+  }
+}
+
 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
 //
 // Creates a regular expression literal for literal index <literal_idx> with
@@ -1804,21 +2011,47 @@
 
 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
 //
-// Creates an array literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
+// Creates an array literal for literal index <literal_idx> with
+// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
-  Node* index = __ BytecodeOperandIdx(0);
-  Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   Node* literal_index = __ SmiTag(literal_index_raw);
-  Node* flags_raw = __ BytecodeOperandFlag(2);
-  Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
-                                literal_index, constant_elements, flags);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  Node* bytecode_flags = __ BytecodeOperandFlag(2);
+
+  Label fast_shallow_clone(assembler),
+      call_runtime(assembler, Label::kDeferred);
+  Node* use_fast_shallow_clone = __ Word32And(
+      bytecode_flags,
+      __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
+  __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+
+  __ Bind(&fast_shallow_clone);
+  {
+    DCHECK(FLAG_allocation_site_pretenuring);
+    Node* result = FastCloneShallowArrayStub::Generate(
+        assembler, closure, literal_index, context, &call_runtime,
+        TRACK_ALLOCATION_SITE);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+
+  __ Bind(&call_runtime);
+  {
+    STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
+    Node* flags_raw = __ Word32And(
+        bytecode_flags,
+        __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
+    Node* flags = __ SmiTag(flags_raw);
+    Node* index = __ BytecodeOperandIdx(0);
+    Node* constant_elements = __ LoadConstantPoolEntry(index);
+    Node* result =
+        __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+                       literal_index, constant_elements, flags);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
 }
 
 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
@@ -1915,19 +2148,22 @@
   __ Dispatch();
 }
 
-// CreateCatchContext <exception> <index>
+// CreateCatchContext <exception> <name_idx> <scope_info_idx>
 //
 // Creates a new context for a catch block with the |exception| in a register,
-// the variable name at |index| and the closure in the accumulator.
+// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
+// closure in the accumulator.
 void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
   Node* exception_reg = __ BytecodeOperandReg(0);
   Node* exception = __ LoadRegister(exception_reg);
-  Node* index = __ BytecodeOperandIdx(1);
-  Node* name = __ LoadConstantPoolEntry(index);
+  Node* name_idx = __ BytecodeOperandIdx(1);
+  Node* name = __ LoadConstantPoolEntry(name_idx);
+  Node* scope_info_idx = __ BytecodeOperandIdx(2);
+  Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
   Node* closure = __ GetAccumulator();
   Node* context = __ GetContext();
   __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
-                                   exception, closure));
+                                   exception, scope_info, closure));
   __ Dispatch();
 }
 
@@ -1936,24 +2172,27 @@
 // Creates a new context with number of |slots| for the function closure.
 void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* slots = __ BytecodeOperandIdx(0);
+  Node* slots = __ BytecodeOperandUImm(0);
   Node* context = __ GetContext();
   __ SetAccumulator(
       FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
   __ Dispatch();
 }
 
-// CreateWithContext <register>
+// CreateWithContext <register> <scope_info_idx>
 //
-// Creates a new context for a with-statement with the object in |register| and
-// the closure in the accumulator.
+// Creates a new context with the ScopeInfo at |scope_info_idx| for a
+// with-statement with the object in |register| and the closure in the
+// accumulator.
 void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
+  Node* scope_info_idx = __ BytecodeOperandIdx(1);
+  Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
   Node* closure = __ GetAccumulator();
   Node* context = __ GetContext();
-  __ SetAccumulator(
-      __ CallRuntime(Runtime::kPushWithContext, context, object, closure));
+  __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object,
+                                   scope_info, closure));
   __ Dispatch();
 }
 
@@ -2047,32 +2286,6 @@
   }
 }
 
-// OsrPoll <loop_depth>
-//
-// Performs a loop nesting check and potentially triggers OSR.
-void Interpreter::DoOsrPoll(InterpreterAssembler* assembler) {
-  Node* loop_depth = __ BytecodeOperandImm(0);
-  Node* osr_level = __ LoadOSRNestingLevel();
-
-  // Check if OSR points at the given {loop_depth} are armed by comparing it to
-  // the current {osr_level} loaded from the header of the BytecodeArray.
-  Label ok(assembler), osr_armed(assembler, Label::kDeferred);
-  Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
-  __ Branch(condition, &ok, &osr_armed);
-
-  __ Bind(&ok);
-  __ Dispatch();
-
-  __ Bind(&osr_armed);
-  {
-    Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
-    Node* target = __ HeapConstant(callable.code());
-    Node* context = __ GetContext();
-    __ CallStub(callable.descriptor(), target, context);
-    __ Dispatch();
-  }
-}
-
 // Throw
 //
 // Throws the exception in the accumulator.
@@ -2158,9 +2371,8 @@
   if (FLAG_debug_code) {
     Label already_receiver(assembler), abort(assembler);
     Node* instance_type = __ LoadInstanceType(receiver);
-    Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
-    __ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
-                                       &already_receiver, &abort);
+    __ Branch(__ IsJSReceiverInstanceType(instance_type), &already_receiver,
+              &abort);
     __ Bind(&abort);
     {
       __ Abort(kExpectedJSReceiver);
@@ -2260,10 +2472,10 @@
   }
 }
 
-// ForInDone <index> <cache_length>
+// ForInContinue <index> <cache_length>
 //
-// Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
+// Returns false if the end of the enumerable properties has been reached.
+void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
@@ -2274,12 +2486,12 @@
   __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
   __ Bind(&if_true);
   {
-    __ SetAccumulator(__ BooleanConstant(true));
+    __ SetAccumulator(__ BooleanConstant(false));
     __ Goto(&end);
   }
   __ Bind(&if_false);
   {
-    __ SetAccumulator(__ BooleanConstant(false));
+    __ SetAccumulator(__ BooleanConstant(true));
     __ Goto(&end);
   }
   __ Bind(&end);
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index bbd0102..b646bf8 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -22,6 +22,7 @@
 class Isolate;
 class Callable;
 class CompilationInfo;
+class CompilationJob;
 
 namespace compiler {
 class Node;
@@ -42,8 +43,8 @@
   // Returns the interrupt budget which should be used for the profiler counter.
   static int InterruptBudget();
 
-  // Generate bytecode for |info|.
-  static bool MakeBytecode(CompilationInfo* info);
+  // Creates a compilation job which will generate bytecode for |info|.
+  static CompilationJob* NewCompilationJob(CompilationInfo* info);
 
   // Return bytecode handler for |bytecode|.
   Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
@@ -55,7 +56,7 @@
   void TraceCodegen(Handle<Code> code);
   const char* LookupNameOfBytecodeHandler(Code* code);
 
-  Local<v8::Object> GetDispatchCountersObject();
+  V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
 
   Address dispatch_table_address() {
     return reinterpret_cast<Address>(&dispatch_table_[0]);
@@ -83,6 +84,11 @@
   template <class Generator>
   void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
 
+  // Generates code to perform the comparison via |Generator| while gathering
+  // type feedback.
+  template <class Generator>
+  void DoCompareOpWithFeedback(InterpreterAssembler* assembler);
+
   // Generates code to perform the bitwise binary operation corresponding to
   // |bitwise_op| while gathering type feedback.
   void DoBitwiseBinaryOp(Token::Value bitwise_op,
@@ -118,18 +124,6 @@
   // Generates code to perform a JS call that collects type feedback.
   void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
 
-  // Generates code to perform a runtime call.
-  void DoCallRuntimeCommon(InterpreterAssembler* assembler);
-
-  // Generates code to perform a runtime call returning a pair.
-  void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
-
-  // Generates code to perform a JS runtime call.
-  void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
-
-  // Generates code to perform a constructor call.
-  void DoCallConstruct(InterpreterAssembler* assembler);
-
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
                 InterpreterAssembler* assembler);
@@ -138,18 +132,28 @@
   void DoLdaLookupSlot(Runtime::FunctionId function_id,
                        InterpreterAssembler* assembler);
 
-  // Generates code to perform a lookup slot store depending on |language_mode|.
+  // Generates code to perform a lookup slot load via |function_id| that can
+  // fast path to a context slot load.
+  void DoLdaLookupContextSlot(Runtime::FunctionId function_id,
+                              InterpreterAssembler* assembler);
+
+  // Generates code to perform a lookup slot load via |function_id| that can
+  // fast path to a global load.
+  void DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
+                             InterpreterAssembler* assembler);
+
+  // Generates code to perform a lookup slot store depending on
+  // |language_mode|.
   void DoStaLookupSlot(LanguageMode language_mode,
                        InterpreterAssembler* assembler);
 
-  // Generates a node with the undefined constant.
-  compiler::Node* BuildLoadUndefined(InterpreterAssembler* assembler);
-
   // Generates code to load a context slot.
   compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
 
   // Generates code to load a global.
-  compiler::Node* BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler);
+  compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
+                                  compiler::Node* feedback_slot,
+                                  InterpreterAssembler* assembler);
 
   // Generates code to load a named property.
   compiler::Node* BuildLoadNamedProperty(Callable ic,
diff --git a/src/interpreter/mkpeephole.cc b/src/interpreter/mkpeephole.cc
index 8e9d5fe..270fe83 100644
--- a/src/interpreter/mkpeephole.cc
+++ b/src/interpreter/mkpeephole.cc
@@ -146,6 +146,9 @@
             Bytecode::kIllegal};
   }
 
+  // TODO(rmcilroy): Add elide for consecutive mov to and from the same
+  // register.
+
   // Remove ToBoolean coercion from conditional jumps where possible.
   if (Bytecodes::WritesBooleanToAccumulator(last)) {
     if (Bytecodes::IsJumpIfToBoolean(current)) {
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 5c71d91..34c98bb 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -76,6 +76,11 @@
   return exception != heap()->termination_exception();
 }
 
+bool Isolate::is_catchable_by_wasm(Object* exception) {
+  return is_catchable_by_javascript(exception) &&
+         (exception->IsNumber() || exception->IsSmi());
+}
+
 void Isolate::FireBeforeCallEnteredCallback() {
   for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
     before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
@@ -100,20 +105,6 @@
   isolate_->set_pending_exception(*pending_exception_);
 }
 
-SaveContext::SaveContext(Isolate* isolate)
-    : isolate_(isolate), prev_(isolate->save_context()) {
-  if (isolate->context() != NULL) {
-    context_ = Handle<Context>(isolate->context());
-  }
-  isolate->set_save_context(this);
-  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
-}
-
-SaveContext::~SaveContext() {
-  isolate_->set_context(context_.is_null() ? NULL : *context_);
-  isolate_->set_save_context(prev_);
-}
-
 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name)     \
   Handle<type> Isolate::name() {                             \
     return Handle<type>(raw_native_context()->name(), this); \
@@ -147,6 +138,11 @@
   return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
 }
 
+bool Isolate::IsStringLengthOverflowIntact() {
+  PropertyCell* has_instance_cell = heap()->string_length_protector();
+  return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/isolate.cc b/src/isolate.cc
index e14db60..63c927b 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -10,7 +10,6 @@
 #include <sstream>
 
 #include "src/ast/context-slot-cache.h"
-#include "src/base/accounting-allocator.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
 #include "src/base/sys-info.h"
@@ -28,6 +27,7 @@
 #include "src/external-reference-table.h"
 #include "src/frames-inl.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 #include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/libsampler/sampler.h"
@@ -43,6 +43,7 @@
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 #include "src/wasm/wasm-module.h"
+#include "src/zone/accounting-allocator.h"
 
 namespace v8 {
 namespace internal {
@@ -315,21 +316,7 @@
   base::OS::Abort();
 }
 
-static Handle<FixedArray> MaybeGrow(Isolate* isolate,
-                                    Handle<FixedArray> elements,
-                                    int cur_position, int new_size) {
-  if (new_size > elements->length()) {
-    int new_capacity = JSObject::NewElementsCapacity(elements->length());
-    Handle<FixedArray> new_elements =
-        isolate->factory()->NewFixedArrayWithHoles(new_capacity);
-    for (int i = 0; i < cur_position; i++) {
-      new_elements->set(i, elements->get(i));
-    }
-    elements = new_elements;
-  }
-  DCHECK(new_size <= elements->length());
-  return elements;
-}
+namespace {
 
 class StackTraceHelper {
  public:
@@ -351,21 +338,17 @@
         break;
     }
     encountered_strict_function_ = false;
-    sloppy_frames_ = 0;
   }
 
+  // Poison stack frames below the first strict mode frame.
   // The stack trace API should not expose receivers and function
   // objects on frames deeper than the top-most one with a strict mode
-  // function. The number of sloppy frames is stored as first element in
-  // the result array.
-  void CountSloppyFrames(JSFunction* fun) {
+  // function.
+  bool IsStrictFrame(JSFunction* fun) {
     if (!encountered_strict_function_) {
-      if (is_strict(fun->shared()->language_mode())) {
-        encountered_strict_function_ = true;
-      } else {
-        sloppy_frames_++;
-      }
+      encountered_strict_function_ = is_strict(fun->shared()->language_mode());
     }
+    return encountered_strict_function_;
   }
 
   // Determines whether the given stack frame should be displayed in a stack
@@ -375,8 +358,6 @@
            IsInSameSecurityContext(fun);
   }
 
-  int sloppy_frames() const { return sloppy_frames_; }
-
  private:
   // This mechanism excludes a number of uninteresting frames from the stack
   // trace. This can be be the first frame (which will be a builtin-exit frame
@@ -422,12 +403,9 @@
   const Handle<Object> caller_;
   bool skip_next_frame_;
 
-  int sloppy_frames_;
   bool encountered_strict_function_;
 };
 
-namespace {
-
 // TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
 // receiver in RegExp constructor frames.
 Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
@@ -435,35 +413,36 @@
              ? Handle<Object>::cast(isolate->factory()->undefined_value())
              : in;
 }
+
+bool GetStackTraceLimit(Isolate* isolate, int* result) {
+  Handle<JSObject> error = isolate->error_function();
+
+  Handle<String> key = isolate->factory()->stackTraceLimit_string();
+  Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(error, key);
+  if (!stack_trace_limit->IsNumber()) return false;
+
+  // Ensure that limit is not negative.
+  *result = Max(FastD2IChecked(stack_trace_limit->Number()), 0);
+  return true;
 }
 
+}  // namespace
+
 Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
                                                 FrameSkipMode mode,
                                                 Handle<Object> caller) {
   DisallowJavascriptExecution no_js(this);
 
-  // Get stack trace limit.
-  Handle<JSObject> error = error_function();
-  Handle<String> stackTraceLimit =
-      factory()->InternalizeUtf8String("stackTraceLimit");
-  DCHECK(!stackTraceLimit.is_null());
-  Handle<Object> stack_trace_limit =
-      JSReceiver::GetDataProperty(error, stackTraceLimit);
-  if (!stack_trace_limit->IsNumber()) return factory()->undefined_value();
-  int limit = FastD2IChecked(stack_trace_limit->Number());
-  limit = Max(limit, 0);  // Ensure that limit is not negative.
+  int limit;
+  if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
 
-  int initial_size = Min(limit, 10);
-  Handle<FixedArray> elements =
-      factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
+  const int initial_size = Min(limit, 10);
+  Handle<FrameArray> elements = factory()->NewFrameArray(initial_size);
 
   StackTraceHelper helper(this, mode, caller);
 
-  // First element is reserved to store the number of sloppy frames.
-  int cursor = 1;
-  int frames_seen = 0;
-  for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit;
-       iter.Advance()) {
+  for (StackFrameIterator iter(this);
+       !iter.done() && elements->FrameCount() < limit; iter.Advance()) {
     StackFrame* frame = iter.frame();
 
     switch (frame->type()) {
@@ -481,26 +460,27 @@
 
           // Filter out internal frames that we do not want to show.
           if (!helper.IsVisibleInStackTrace(*fun)) continue;
-          helper.CountSloppyFrames(*fun);
 
           Handle<Object> recv = frames[i].receiver();
           Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+          const int offset = frames[i].code_offset();
+
+          bool force_constructor = false;
           if (frame->type() == StackFrame::BUILTIN) {
             // Help CallSite::IsConstructor correctly detect hand-written
             // construct stubs.
-            Code* code = Code::cast(*abstract_code);
-            if (code->is_construct_stub()) {
-              recv = handle(heap()->call_site_constructor_symbol(), this);
+            if (Code::cast(*abstract_code)->is_construct_stub()) {
+              force_constructor = true;
             }
           }
-          Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
 
-          elements = MaybeGrow(this, elements, cursor, cursor + 4);
-          elements->set(cursor++, *TheHoleToUndefined(this, recv));
-          elements->set(cursor++, *fun);
-          elements->set(cursor++, *abstract_code);
-          elements->set(cursor++, *offset);
-          frames_seen++;
+          int flags = 0;
+          if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
+          if (force_constructor) flags |= FrameArray::kForceConstructor;
+
+          elements = FrameArray::AppendJSFrame(
+              elements, TheHoleToUndefined(this, recv), fun, abstract_code,
+              offset, flags);
         }
       } break;
 
@@ -510,54 +490,49 @@
 
         // Filter out internal frames that we do not want to show.
         if (!helper.IsVisibleInStackTrace(*fun)) continue;
-        helper.CountSloppyFrames(*fun);
 
-        Handle<Code> code = handle(exit_frame->LookupCode(), this);
-        int offset =
+        Handle<Object> recv(exit_frame->receiver(), this);
+        Handle<Code> code(exit_frame->LookupCode(), this);
+        const int offset =
             static_cast<int>(exit_frame->pc() - code->instruction_start());
 
-        // In order to help CallSite::IsConstructor detect builtin constructors,
-        // we reuse the receiver field to pass along a special symbol.
-        Handle<Object> recv;
-        if (exit_frame->IsConstructor()) {
-          recv = factory()->call_site_constructor_symbol();
-        } else {
-          recv = handle(exit_frame->receiver(), this);
-        }
+        int flags = 0;
+        if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
+        if (exit_frame->IsConstructor()) flags |= FrameArray::kForceConstructor;
 
-        elements = MaybeGrow(this, elements, cursor, cursor + 4);
-        elements->set(cursor++, *recv);
-        elements->set(cursor++, *fun);
-        elements->set(cursor++, *code);
-        elements->set(cursor++, Smi::FromInt(offset));
-        frames_seen++;
+        elements = FrameArray::AppendJSFrame(elements, recv, fun,
+                                             Handle<AbstractCode>::cast(code),
+                                             offset, flags);
       } break;
 
       case StackFrame::WASM: {
         WasmFrame* wasm_frame = WasmFrame::cast(frame);
+        Handle<Object> wasm_object(wasm_frame->wasm_obj(), this);
+        const int wasm_function_index = wasm_frame->function_index();
         Code* code = wasm_frame->unchecked_code();
-        Handle<AbstractCode> abstract_code =
-            Handle<AbstractCode>(AbstractCode::cast(code), this);
-        int offset =
+        Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
+        const int offset =
             static_cast<int>(wasm_frame->pc() - code->instruction_start());
-        elements = MaybeGrow(this, elements, cursor, cursor + 4);
-        elements->set(cursor++, wasm_frame->wasm_obj());
-        elements->set(cursor++, Smi::FromInt(wasm_frame->function_index()));
-        elements->set(cursor++, *abstract_code);
-        elements->set(cursor++, Smi::FromInt(offset));
-        frames_seen++;
+
+        // TODO(wasm): The wasm object returned by the WasmFrame should always
+        //             be a wasm object.
+        DCHECK(wasm::IsWasmObject(*wasm_object) ||
+               wasm_object->IsUndefined(this));
+
+        elements = FrameArray::AppendWasmFrame(
+            elements, wasm_object, wasm_function_index, abstract_code, offset,
+            FrameArray::kIsWasmFrame);
       } break;
 
       default:
         break;
     }
   }
-  elements->set(0, Smi::FromInt(helper.sloppy_frames()));
-  elements->Shrink(cursor);
-  Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
-  result->set_length(Smi::FromInt(cursor));
+
+  elements->ShrinkToFit();
+
   // TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
-  return result;
+  return factory()->NewJSArrayWithElements(elements);
 }
 
 MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
@@ -764,19 +739,6 @@
   Handle<String> constructor_key_;
 };
 
-
-int PositionFromStackTrace(Handle<FixedArray> elements, int index) {
-  DisallowHeapAllocation no_gc;
-  Object* maybe_code = elements->get(index + 2);
-  if (maybe_code->IsSmi()) {
-    return Smi::cast(maybe_code)->value();
-  } else {
-    AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
-    int code_offset = Smi::cast(elements->get(index + 3))->value();
-    return abstract_code->SourcePosition(code_offset);
-  }
-}
-
 Handle<JSArray> Isolate::CaptureCurrentStackTrace(
     int frame_limit, StackTrace::StackTraceOptions options) {
   DisallowJavascriptExecution no_js(this);
@@ -963,6 +925,10 @@
 
 
 Object* Isolate::StackOverflow() {
+  if (FLAG_abort_on_stack_overflow) {
+    FATAL("Aborting on stack overflow");
+  }
+
   DisallowJavascriptExecution no_js(this);
   HandleScope scope(this);
 
@@ -979,7 +945,8 @@
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && FLAG_stress_compaction) {
-    heap()->CollectAllGarbage(Heap::kNoGCFlags, "trigger compaction");
+    heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                              GarbageCollectionReason::kTesting);
   }
 #endif  // VERIFY_HEAP
 
@@ -1017,6 +984,8 @@
 
 
 void Isolate::InvokeApiInterruptCallbacks() {
+  RuntimeCallTimerScope runtimeTimer(
+      this, &RuntimeCallStats::InvokeApiInterruptCallbacks);
   // Note: callback below should be called outside of execution access lock.
   while (true) {
     InterruptEntry entry;
@@ -1180,8 +1149,8 @@
   Address handler_sp = nullptr;
   Address handler_fp = nullptr;
 
-  // Special handling of termination exceptions, uncatchable by JavaScript code,
-  // we unwind the handlers until the top ENTRY handler is found.
+  // Special handling of termination exceptions, uncatchable by JavaScript and
+  // Wasm code, we unwind the handlers until the top ENTRY handler is found.
   bool catchable_by_js = is_catchable_by_javascript(exception);
 
   // Compute handler and stack unwinding information by performing a full walk
@@ -1203,6 +1172,28 @@
       break;
     }
 
+    if (FLAG_wasm_eh_prototype) {
+      if (frame->is_wasm() && is_catchable_by_wasm(exception)) {
+        int stack_slots = 0;  // Will contain stack slot count of frame.
+        WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
+        offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
+        if (offset >= 0) {
+          // Compute the stack pointer from the frame pointer. This ensures that
+          // argument slots on the stack are dropped as returning would.
+          Address return_sp = frame->fp() +
+                              StandardFrameConstants::kFixedFrameSizeAboveFp -
+                              stack_slots * kPointerSize;
+
+          // Gather information from the frame.
+          code = frame->LookupCode();
+
+          handler_sp = return_sp;
+          handler_fp = frame->fp();
+          break;
+        }
+      }
+    }
+
     // For optimized frames we perform a lookup in the handler table.
     if (frame->is_optimized() && catchable_by_js) {
       OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
@@ -1349,6 +1340,8 @@
       JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
       HandlerTable::CatchPrediction prediction = PredictException(js_frame);
       if (prediction == HandlerTable::DESUGARING) return CAUGHT_BY_DESUGARING;
+      if (prediction == HandlerTable::ASYNC_AWAIT) return CAUGHT_BY_ASYNC_AWAIT;
+      if (prediction == HandlerTable::PROMISE) return CAUGHT_BY_PROMISE;
       if (prediction != HandlerTable::UNCAUGHT) return CAUGHT_BY_JAVASCRIPT;
     }
 
@@ -1425,36 +1418,20 @@
 
 
 void Isolate::PrintCurrentStackTrace(FILE* out) {
-  StackTraceFrameIterator it(this);
-  while (!it.done()) {
+  for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
+    if (!it.is_javascript()) continue;
+
     HandleScope scope(this);
-    // Find code position if recorded in relocation info.
-    StandardFrame* frame = it.frame();
-    AbstractCode* abstract_code;
-    int code_offset;
-    if (frame->is_interpreted()) {
-      InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
-      abstract_code = AbstractCode::cast(iframe->GetBytecodeArray());
-      code_offset = iframe->GetBytecodeOffset();
-    } else {
-      DCHECK(frame->is_java_script() || frame->is_wasm());
-      Code* code = frame->LookupCode();
-      abstract_code = AbstractCode::cast(code);
-      code_offset = static_cast<int>(frame->pc() - code->instruction_start());
-    }
-    int pos = abstract_code->SourcePosition(code_offset);
-    JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
-    Handle<Object> pos_obj(Smi::FromInt(pos), this);
-    // Fetch function and receiver.
-    Handle<JSFunction> fun(js_frame->function(), this);
-    Handle<Object> recv(js_frame->receiver(), this);
-    // Advance to the next JavaScript frame and determine if the
-    // current frame is the top-level frame.
-    it.Advance();
-    Handle<Object> is_top_level = factory()->ToBoolean(it.done());
-    // Generate and print stack trace line.
-    Handle<String> line =
-        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+    JavaScriptFrame* frame = it.javascript_frame();
+
+    Handle<Object> receiver(frame->receiver(), this);
+    Handle<JSFunction> function(frame->function(), this);
+    Handle<AbstractCode> code(AbstractCode::cast(frame->LookupCode()), this);
+    const int offset =
+        static_cast<int>(frame->pc() - code->instruction_start());
+
+    JSStackFrame site(this, receiver, function, code, offset);
+    Handle<String> line = site.ToString().ToHandleChecked();
     if (line->length() > 0) {
       line->PrintOn(out);
       PrintF(out, "\n");
@@ -1522,22 +1499,25 @@
   if (!property->IsJSArray()) return false;
   Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
 
-  Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
-  int elements_limit = Smi::cast(simple_stack_trace->length())->value();
+  Handle<FrameArray> elements(FrameArray::cast(simple_stack_trace->elements()));
 
-  for (int i = 1; i < elements_limit; i += 4) {
-    Handle<Object> fun_obj = handle(elements->get(i + 1), this);
-    if (fun_obj->IsSmi()) {
+  const int frame_count = elements->FrameCount();
+  for (int i = 0; i < frame_count; i++) {
+    if (elements->IsWasmFrame(i)) {
       // TODO(clemensh): handle wasm frames
       return false;
     }
-    Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
+
+    Handle<JSFunction> fun = handle(elements->Function(i), this);
     if (!fun->shared()->IsSubjectToDebugging()) continue;
 
     Object* script = fun->shared()->script();
     if (script->IsScript() &&
         !(Script::cast(script)->source()->IsUndefined(this))) {
-      int pos = PositionFromStackTrace(elements, i);
+      AbstractCode* abstract_code = elements->Code(i);
+      const int code_offset = elements->Offset(i)->value();
+      const int pos = abstract_code->SourcePosition(code_offset);
+
       Handle<Script> casted_script(Script::cast(script));
       *target = MessageLocation(casted_script, pos, pos + 1);
       return true;
@@ -1752,6 +1732,22 @@
   global_handles()->Destroy(global_promise.location());
 }
 
+bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
+  Handle<JSFunction> fun = promise_has_user_defined_reject_handler();
+  Handle<Object> has_reject_handler;
+  // If we are, e.g., overflowing the stack, don't try to call out to JS
+  if (!AllowJavascriptExecution::IsAllowed(this)) return false;
+  // Call the registered function to check for a handler
+  if (Execution::TryCall(this, fun, promise, 0, NULL)
+          .ToHandle(&has_reject_handler)) {
+    return has_reject_handler->IsTrue(this);
+  }
+  // If an exception is thrown in the course of execution of this built-in
+  // function, it indicates either a bug, or a synthetic uncatchable
+  // exception in the shutdown path. In either case, it's OK to predict either
+  // way in DevTools.
+  return false;
+}
 
 Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
   Handle<Object> undefined = factory()->undefined_value();
@@ -1762,18 +1758,49 @@
   if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
     return undefined;
   }
+  Handle<Object> retval = undefined;
+  PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
   for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
     switch (PredictException(it.frame())) {
       case HandlerTable::UNCAUGHT:
-        break;
+        continue;
       case HandlerTable::CAUGHT:
       case HandlerTable::DESUGARING:
-        return undefined;
+        if (retval->IsJSObject()) {
+          // Caught the result of an inner async/await invocation.
+          // Mark the inner promise as caught in the "synchronous case" so
+          // that Debug::OnException will see. In the synchronous case,
+          // namely in the code in an async function before the first
+          // await, the function which has this exception event has not yet
+          // returned, so the generated Promise has not yet been marked
+          // by AsyncFunctionAwaitCaught with promiseHandledHintSymbol.
+          Handle<Symbol> key = factory()->promise_handled_hint_symbol();
+          JSObject::SetProperty(Handle<JSObject>::cast(retval), key,
+                                factory()->true_value(), STRICT)
+              .Assert();
+        }
+        return retval;
       case HandlerTable::PROMISE:
-        return tltop->promise_on_stack_->promise();
+        return promise_on_stack
+                   ? Handle<Object>::cast(promise_on_stack->promise())
+                   : undefined;
+      case HandlerTable::ASYNC_AWAIT: {
+        // If in the initial portion of async/await, continue the loop to pop up
+        // successive async/await stack frames until an asynchronous one with
+        // dependents is found, or a non-async stack frame is encountered, in
+        // order to handle the synchronous async/await catch prediction case:
+        // assume that async function calls are awaited.
+        if (!promise_on_stack) return retval;
+        retval = promise_on_stack->promise();
+        if (PromiseHasUserDefinedRejectHandler(retval)) {
+          return retval;
+        }
+        promise_on_stack = promise_on_stack->prev();
+        continue;
+      }
     }
   }
-  return undefined;
+  return retval;
 }
 
 
@@ -1904,13 +1931,13 @@
 #define TRACE_ISOLATE(tag)
 #endif
 
-class VerboseAccountingAllocator : public base::AccountingAllocator {
+class VerboseAccountingAllocator : public AccountingAllocator {
  public:
   VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
       : heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
 
-  void* Allocate(size_t size) override {
-    void* memory = base::AccountingAllocator::Allocate(size);
+  v8::internal::Segment* AllocateSegment(size_t size) override {
+    v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
     if (memory) {
       size_t current = GetCurrentMemoryUsage();
       if (last_memory_usage_.Value() + sample_bytes_ < current) {
@@ -1921,8 +1948,8 @@
     return memory;
   }
 
-  void Free(void* memory, size_t bytes) override {
-    base::AccountingAllocator::Free(memory, bytes);
+  void FreeSegment(v8::internal::Segment* memory) override {
+    AccountingAllocator::FreeSegment(memory);
     size_t current = GetCurrentMemoryUsage();
     if (current + sample_bytes_ < last_memory_usage_.Value()) {
       PrintJSON(current);
@@ -1977,9 +2004,8 @@
       unicode_cache_(NULL),
       allocator_(FLAG_trace_gc_object_stats
                      ? new VerboseAccountingAllocator(&heap_, 256 * KB)
-                     : new base::AccountingAllocator()),
+                     : new AccountingAllocator()),
       runtime_zone_(new Zone(allocator_)),
-      interface_descriptor_zone_(new Zone(allocator_)),
       inner_pointer_to_code_cache_(NULL),
       global_handles_(NULL),
       eternal_handles_(NULL),
@@ -2004,8 +2030,6 @@
       deferred_handles_head_(NULL),
       optimizing_compile_dispatcher_(NULL),
       stress_deopt_count_(0),
-      virtual_handler_register_(NULL),
-      virtual_slot_register_(NULL),
       next_optimization_id_(0),
       js_calls_from_api_counter_(0),
 #if TRACE_MAPS
@@ -2258,9 +2282,6 @@
   delete runtime_zone_;
   runtime_zone_ = nullptr;
 
-  delete interface_descriptor_zone_;
-  interface_descriptor_zone_ = nullptr;
-
   delete allocator_;
   allocator_ = nullptr;
 
@@ -2399,6 +2420,12 @@
     return false;
   }
 
+// Initialize the interface descriptors ahead of time.
+#define INTERFACE_DESCRIPTOR(V) \
+  { V##Descriptor(this); }
+  INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
+#undef INTERFACE_DESCRIPTOR
+
   deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
 
   const bool create_heap_objects = (des == NULL);
@@ -2436,13 +2463,19 @@
   runtime_profiler_ = new RuntimeProfiler(this);
 
   // If we are deserializing, read the state into the now-empty heap.
-  if (!create_heap_objects) {
-    des->Deserialize(this);
-  }
-  load_stub_cache_->Initialize();
-  store_stub_cache_->Initialize();
-  if (FLAG_ignition || serializer_enabled()) {
-    interpreter_->Initialize();
+  {
+    AlwaysAllocateScope always_allocate(this);
+
+    if (!create_heap_objects) {
+      des->Deserialize(this);
+    }
+    load_stub_cache_->Initialize();
+    store_stub_cache_->Initialize();
+    if (FLAG_ignition || serializer_enabled()) {
+      interpreter_->Initialize();
+    }
+
+    heap_.NotifyDeserializationComplete();
   }
 
   // Finish initialization of ThreadLocal after deserialization is done.
@@ -2473,8 +2506,6 @@
 
   time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
 
-  heap_.NotifyDeserializationComplete();
-
   if (!create_heap_objects) {
     // Now that the heap is consistent, it's OK to generate the code for the
     // deopt entry table that might have been referred to by optimized code in
@@ -2620,7 +2651,8 @@
   turbo_statistics_ = nullptr;
   delete hstatistics_;
   hstatistics_ = nullptr;
-  if (FLAG_runtime_call_stats) {
+  if (FLAG_runtime_call_stats &&
+      !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) {
     OFStream os(stdout);
     counters()->runtime_call_stats()->Print(os);
     counters()->runtime_call_stats()->Reset();
@@ -2823,6 +2855,15 @@
   DCHECK(!IsArraySpeciesLookupChainIntact());
 }
 
+void Isolate::InvalidateStringLengthOverflowProtector() {
+  DCHECK(factory()->string_length_protector()->value()->IsSmi());
+  DCHECK(IsStringLengthOverflowIntact());
+  PropertyCell::SetValueWithInvalidation(
+      factory()->string_length_protector(),
+      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+  DCHECK(!IsStringLengthOverflowIntact());
+}
+
 bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
   DisallowHeapAllocation no_gc;
   return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -2964,9 +3005,44 @@
       v8::Utils::StackTraceToLocal(stack_trace)));
 }
 
+void Isolate::PromiseResolveThenableJob(Handle<PromiseContainer> container,
+                                        MaybeHandle<Object>* result,
+                                        MaybeHandle<Object>* maybe_exception) {
+  if (debug()->is_active()) {
+    Handle<Object> before_debug_event(container->before_debug_event(), this);
+    if (before_debug_event->IsJSObject()) {
+      debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(before_debug_event));
+    }
+  }
+
+  Handle<JSReceiver> thenable(container->thenable(), this);
+  Handle<JSFunction> resolve(container->resolve(), this);
+  Handle<JSFunction> reject(container->reject(), this);
+  Handle<JSReceiver> then(container->then(), this);
+  Handle<Object> argv[] = {resolve, reject};
+  *result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
+                               maybe_exception);
+
+  Handle<Object> reason;
+  if (maybe_exception->ToHandle(&reason)) {
+    DCHECK(result->is_null());
+    Handle<Object> reason_arg[] = {reason};
+    *result =
+        Execution::TryCall(this, reject, factory()->undefined_value(),
+                           arraysize(reason_arg), reason_arg, maybe_exception);
+  }
+
+  if (debug()->is_active()) {
+    Handle<Object> after_debug_event(container->after_debug_event(), this);
+    if (after_debug_event->IsJSObject()) {
+      debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(after_debug_event));
+    }
+  }
+}
 
 void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
-  DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo());
+  DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
+         microtask->IsPromiseContainer());
   Handle<FixedArray> queue(heap()->microtask_queue(), this);
   int num_tasks = pending_microtask_count();
   DCHECK(num_tasks <= queue->length());
@@ -2995,6 +3071,8 @@
 
 
 void Isolate::RunMicrotasksInternal() {
+  if (!pending_microtask_count()) return;
+  TRACE_EVENT0("v8.execute", "RunMicrotasks");
   while (pending_microtask_count() > 0) {
     HandleScope scope(this);
     int num_tasks = pending_microtask_count();
@@ -3006,18 +3084,41 @@
     Isolate* isolate = this;
     FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
       Handle<Object> microtask(queue->get(i), this);
-      if (microtask->IsJSFunction()) {
-        Handle<JSFunction> microtask_function =
-            Handle<JSFunction>::cast(microtask);
+
+      if (microtask->IsCallHandlerInfo()) {
+        Handle<CallHandlerInfo> callback_info =
+            Handle<CallHandlerInfo>::cast(microtask);
+        v8::MicrotaskCallback callback =
+            v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
+        void* data = v8::ToCData<void*>(callback_info->data());
+        callback(data);
+      } else {
         SaveContext save(this);
-        set_context(microtask_function->context()->native_context());
+        Context* context = microtask->IsJSFunction()
+                               ? Handle<JSFunction>::cast(microtask)->context()
+                               : Handle<PromiseContainer>::cast(microtask)
+                                     ->resolve()
+                                     ->context();
+        set_context(context->native_context());
         handle_scope_implementer_->EnterMicrotaskContext(
-            handle(microtask_function->context(), this));
+            Handle<Context>(context, this));
+
+        MaybeHandle<Object> result;
         MaybeHandle<Object> maybe_exception;
-        MaybeHandle<Object> result = Execution::TryCall(
-            this, microtask_function, factory()->undefined_value(), 0, NULL,
-            &maybe_exception);
+
+        if (microtask->IsJSFunction()) {
+          Handle<JSFunction> microtask_function =
+              Handle<JSFunction>::cast(microtask);
+          result = Execution::TryCall(this, microtask_function,
+                                      factory()->undefined_value(), 0, NULL,
+                                      &maybe_exception);
+        } else {
+          PromiseResolveThenableJob(Handle<PromiseContainer>::cast(microtask),
+                                    &result, &maybe_exception);
+        }
+
         handle_scope_implementer_->LeaveMicrotaskContext();
+
         // If execution is terminating, just bail out.
         if (result.is_null() && maybe_exception.is_null()) {
           // Clear out any remaining callbacks in the queue.
@@ -3025,13 +3126,6 @@
           set_pending_microtask_count(0);
           return;
         }
-      } else {
-        Handle<CallHandlerInfo> callback_info =
-            Handle<CallHandlerInfo>::cast(microtask);
-        v8::MicrotaskCallback callback =
-            v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
-        void* data = v8::ToCData<void*>(callback_info->data());
-        callback(data);
       }
     });
   }
@@ -3179,6 +3273,15 @@
   is_isolate_in_background_ = false;
 }
 
+void Isolate::PrintWithTimestamp(const char* format, ...) {
+  base::OS::Print("[%d:%p] %8.0f ms: ", base::OS::GetCurrentProcessId(),
+                  static_cast<void*>(this), time_millis_since_init());
+  va_list arguments;
+  va_start(arguments, format);
+  base::OS::VPrint(format, arguments);
+  va_end(arguments);
+}
+
 bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
   StackGuard* stack_guard = isolate_->stack_guard();
 #ifdef USE_SIMULATOR
@@ -3190,6 +3293,21 @@
   return GetCurrentStackPosition() - gap < stack_guard->real_climit();
 }
 
+SaveContext::SaveContext(Isolate* isolate)
+    : isolate_(isolate), prev_(isolate->save_context()) {
+  if (isolate->context() != NULL) {
+    context_ = Handle<Context>(isolate->context());
+  }
+  isolate->set_save_context(this);
+
+  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+SaveContext::~SaveContext() {
+  isolate_->set_context(context_.is_null() ? NULL : *context_);
+  isolate_->set_save_context(prev_);
+}
+
 #ifdef DEBUG
 AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
     : isolate_(isolate), context_(isolate->context(), isolate) {}
diff --git a/src/isolate.h b/src/isolate.h
index eb1841d..8d0d3b4 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -23,13 +23,11 @@
 #include "src/messages.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/runtime/runtime.h"
-#include "src/tracing/trace-event.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 
 namespace base {
-class AccountingAllocator;
 class RandomNumberGenerator;
 }
 
@@ -52,6 +50,7 @@
 class CpuFeatures;
 class CpuProfiler;
 class DeoptimizerData;
+class DescriptorLookupCache;
 class Deserializer;
 class EmptyStatement;
 class ExternalCallbackScope;
@@ -63,6 +62,7 @@
 class HTracer;
 class InlineRuntimeFunctionsTable;
 class InnerPointerToCodeCache;
+class KeyedLookupCache;
 class Logger;
 class MaterializedObjectStore;
 class OptimizingCompileDispatcher;
@@ -94,14 +94,6 @@
 class Interpreter;
 }
 
-// Static indirection table for handles to constants.  If a frame
-// element represents a constant, the data contains an index into
-// this table of handles to the actual constants.
-// Static indirection table for handles to constants.  If a Result
-// represents a constant, the data contains an index into this table
-// of handles to the actual constants.
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate)    \
   do {                                                    \
     Isolate* __isolate__ = (isolate);                     \
@@ -369,9 +361,9 @@
 
 #if USE_SIMULATOR
 
-#define ISOLATE_INIT_SIMULATOR_LIST(V)       \
-  V(bool, simulator_initialized, false)      \
-  V(base::HashMap*, simulator_i_cache, NULL) \
+#define ISOLATE_INIT_SIMULATOR_LIST(V)                    \
+  V(bool, simulator_initialized, false)                   \
+  V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \
   V(Redirection*, simulator_redirection, NULL)
 #else
 
@@ -629,6 +621,7 @@
   bool IsExternalHandlerOnTop(Object* exception);
 
   inline bool is_catchable_by_javascript(Object* exception);
+  inline bool is_catchable_by_wasm(Object* exception);
 
   // JS execution stack (see frames.h).
   static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -672,8 +665,14 @@
   // Push and pop a promise and the current try-catch handler.
   void PushPromise(Handle<JSObject> promise);
   void PopPromise();
+
+  // Return the relevant Promise that a throw/rejection pertains to, based
+  // on the contents of the Promise stack
   Handle<Object> GetPromiseOnStackOnThrow();
 
+  // Heuristically guess whether a Promise is handled by user catch handler
+  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
+
   class ExceptionScope {
    public:
     // Scope currently can only be used for regular exceptions,
@@ -750,7 +749,9 @@
     NOT_CAUGHT,
     CAUGHT_BY_JAVASCRIPT,
     CAUGHT_BY_EXTERNAL,
-    CAUGHT_BY_DESUGARING
+    CAUGHT_BY_DESUGARING,
+    CAUGHT_BY_PROMISE,
+    CAUGHT_BY_ASYNC_AWAIT
   };
   CatchType PredictExceptionCatcher();
 
@@ -843,9 +844,6 @@
     DCHECK(counters_ != NULL);
     return counters_;
   }
-  tracing::TraceEventStatsTable* trace_event_stats_table() {
-    return &trace_event_stats_table_;
-  }
   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   CompilationCache* compilation_cache() { return compilation_cache_; }
   Logger* logger() {
@@ -889,7 +887,6 @@
     return handle_scope_implementer_;
   }
   Zone* runtime_zone() { return runtime_zone_; }
-  Zone* interface_descriptor_zone() { return interface_descriptor_zone_; }
 
   UnicodeCache* unicode_cache() {
     return unicode_cache_;
@@ -1005,6 +1002,7 @@
   inline bool IsHasInstanceLookupChainIntact();
   bool IsIsConcatSpreadableLookupChainIntact();
   bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
+  inline bool IsStringLengthOverflowIntact();
 
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
@@ -1023,6 +1021,7 @@
   void InvalidateArraySpeciesProtector();
   void InvalidateHasInstanceProtector();
   void InvalidateIsConcatSpreadableProtector();
+  void InvalidateStringLengthOverflowProtector();
 
   // Returns true if array is the initial array prototype in any native context.
   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1064,12 +1063,6 @@
 
   void* stress_deopt_count_address() { return &stress_deopt_count_; }
 
-  void* virtual_handler_register_address() {
-    return &virtual_handler_register_;
-  }
-
-  void* virtual_slot_register_address() { return &virtual_slot_register_; }
-
   base::RandomNumberGenerator* random_number_generator();
 
   // Given an address occupied by a live code object, return that object.
@@ -1108,6 +1101,9 @@
   void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
                            v8::PromiseRejectEvent event);
 
+  void PromiseResolveThenableJob(Handle<PromiseContainer> container,
+                                 MaybeHandle<Object>* result,
+                                 MaybeHandle<Object>* maybe_exception);
   void EnqueueMicrotask(Handle<Object> microtask);
   void RunMicrotasks();
   bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1153,7 +1149,7 @@
 
   interpreter::Interpreter* interpreter() const { return interpreter_; }
 
-  base::AccountingAllocator* allocator() { return allocator_; }
+  AccountingAllocator* allocator() { return allocator_; }
 
   bool IsInAnyContext(Object* object, uint32_t index);
 
@@ -1165,6 +1161,12 @@
 
   bool IsIsolateInBackground() { return is_isolate_in_background_; }
 
+  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
+
+#ifdef USE_SIMULATOR
+  base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
+#endif
+
  protected:
   explicit Isolate(bool enable_serializer);
   bool IsArrayOrObjectPrototype(Object* object);
@@ -1303,7 +1305,6 @@
   RuntimeProfiler* runtime_profiler_;
   CompilationCache* compilation_cache_;
   Counters* counters_;
-  tracing::TraceEventStatsTable trace_event_stats_table_;
   base::RecursiveMutex break_access_;
   Logger* logger_;
   StackGuard stack_guard_;
@@ -1324,9 +1325,8 @@
   HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
-  base::AccountingAllocator* allocator_;
+  AccountingAllocator* allocator_;
   Zone* runtime_zone_;
-  Zone* interface_descriptor_zone_;
   InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   GlobalHandles* global_handles_;
   EternalHandles* eternal_handles_;
@@ -1407,9 +1407,6 @@
   // Counts deopt points if deopt_every_n_times is enabled.
   unsigned int stress_deopt_count_;
 
-  Address virtual_handler_register_;
-  Address virtual_slot_register_;
-
   int next_optimization_id_;
 
   // Counts javascript calls from the API. Wraps around on overflow.
@@ -1443,6 +1440,10 @@
   v8::Isolate::AbortOnUncaughtExceptionCallback
       abort_on_uncaught_exception_callback_;
 
+#ifdef USE_SIMULATOR
+  base::Mutex simulator_i_cache_mutex_;
+#endif
+
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
   friend class OptimizingCompileDispatcher;
@@ -1485,8 +1486,8 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  explicit inline SaveContext(Isolate* isolate);
-  inline ~SaveContext();
+  explicit SaveContext(Isolate* isolate);
+  ~SaveContext();
 
   Handle<Context> context() { return context_; }
   SaveContext* prev() { return prev_; }
@@ -1496,8 +1497,6 @@
     return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
   }
 
-  Isolate* isolate() { return isolate_; }
-
  private:
   Isolate* const isolate_;
   Handle<Context> context_;
diff --git a/src/js/async-await.js b/src/js/async-await.js
new file mode 100644
index 0000000..b733f3d
--- /dev/null
+++ b/src/js/async-await.js
@@ -0,0 +1,180 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var AsyncFunctionNext;
+var AsyncFunctionThrow;
+var GlobalPromise;
+var IsPromise;
+var NewPromiseCapability;
+var PerformPromiseThen;
+var PromiseCreate;
+var PromiseNextMicrotaskID;
+var RejectPromise;
+var ResolvePromise;
+
+utils.Import(function(from) {
+  AsyncFunctionNext = from.AsyncFunctionNext;
+  AsyncFunctionThrow = from.AsyncFunctionThrow;
+  GlobalPromise = from.GlobalPromise;
+  IsPromise = from.IsPromise;
+  NewPromiseCapability = from.NewPromiseCapability;
+  PerformPromiseThen = from.PerformPromiseThen;
+  PromiseCreate = from.PromiseCreate;
+  PromiseNextMicrotaskID = from.PromiseNextMicrotaskID;
+  RejectPromise = from.RejectPromise;
+  ResolvePromise = from.ResolvePromise;
+});
+
+var promiseAsyncStackIDSymbol =
+    utils.ImportNow("promise_async_stack_id_symbol");
+var promiseHandledBySymbol =
+    utils.ImportNow("promise_handled_by_symbol");
+var promiseForwardingHandlerSymbol =
+    utils.ImportNow("promise_forwarding_handler_symbol");
+var promiseHandledHintSymbol =
+    utils.ImportNow("promise_handled_hint_symbol");
+var promiseHasHandlerSymbol =
+    utils.ImportNow("promise_has_handler_symbol");
+
+// -------------------------------------------------------------------
+
+function PromiseCastResolved(value) {
+  if (IsPromise(value)) {
+    return value;
+  } else {
+    var promise = PromiseCreate();
+    ResolvePromise(promise, value);
+    return promise;
+  }
+}
+
+// ES#abstract-ops-async-function-await
+// AsyncFunctionAwait ( value )
+// Shared logic for the core of await. The parser desugars
+//   await awaited
+// into
+//   yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
+// The 'awaited' parameter is the value; the generator stands in
+// for the asyncContext, and .promise is the larger promise under
+// construction by the enclosing async function.
+function AsyncFunctionAwait(generator, awaited, outerPromise) {
+  // Promise.resolve(awaited).then(
+  //     value => AsyncFunctionNext(value),
+  //     error => AsyncFunctionThrow(error)
+  // );
+  var promise = PromiseCastResolved(awaited);
+
+  var onFulfilled = sentValue => {
+    %_Call(AsyncFunctionNext, generator, sentValue);
+    // The resulting Promise is a throwaway, so it doesn't matter what it
+    // resolves to. What is important is that we don't end up keeping the
+    // whole chain of intermediate Promises alive by returning the value
+    // of AsyncFunctionNext, as that would create a memory leak.
+    return;
+  };
+  var onRejected = sentError => {
+    %_Call(AsyncFunctionThrow, generator, sentError);
+    // Similarly, returning the huge Promise here would cause a long
+    // resolution chain to find what the exception to throw is, and
+    // create a similar memory leak, and it does not matter what
+    // sort of rejection this intermediate Promise becomes.
+    return;
+  }
+
+  // Just forwarding the exception, so no debugEvent for throwawayCapability
+  var throwawayCapability = NewPromiseCapability(GlobalPromise, false);
+
+  // The Promise will be thrown away and not handled, but it shouldn't trigger
+  // unhandled reject events as its work is done
+  SET_PRIVATE(throwawayCapability.promise, promiseHasHandlerSymbol, true);
+
+  if (DEBUG_IS_ACTIVE) {
+    if (IsPromise(awaited)) {
+      // Mark the reject handler callback to be a forwarding edge, rather
+      // than a meaningful catch handler
+      SET_PRIVATE(onRejected, promiseForwardingHandlerSymbol, true);
+    }
+
+    // Mark the dependency to outerPromise in case the throwaway Promise is
+    // found on the Promise stack
+    SET_PRIVATE(throwawayCapability.promise, promiseHandledBySymbol,
+                outerPromise);
+  }
+
+  PerformPromiseThen(promise, onFulfilled, onRejected, throwawayCapability);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates no locally surrounding catch block
+function AsyncFunctionAwaitUncaught(generator, awaited, outerPromise) {
+  AsyncFunctionAwait(generator, awaited, outerPromise);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates that there is a locally surrounding catch block
+function AsyncFunctionAwaitCaught(generator, awaited, outerPromise) {
+  if (DEBUG_IS_ACTIVE && IsPromise(awaited)) {
+    SET_PRIVATE(awaited, promiseHandledHintSymbol, true);
+  }
+  AsyncFunctionAwait(generator, awaited, outerPromise);
+}
+
+// How the parser rejects promises from async/await desugaring
+function RejectPromiseNoDebugEvent(promise, reason) {
+  return RejectPromise(promise, reason, false);
+}
+
+function AsyncFunctionPromiseCreate() {
+  var promise = PromiseCreate();
+  if (DEBUG_IS_ACTIVE) {
+    // Push the Promise under construction in an async function on
+    // the catch prediction stack to handle exceptions thrown before
+    // the first await.
+    %DebugPushPromise(promise);
+    // Assign ID and create a recurring task to save stack for future
+    // resumptions from await.
+    var id = PromiseNextMicrotaskID();
+    SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
+    %DebugAsyncTaskEvent({
+      type: "enqueueRecurring",
+      id: id,
+      name: "async function",
+    });
+  }
+  return promise;
+}
+
+function AsyncFunctionPromiseRelease(promise) {
+  if (DEBUG_IS_ACTIVE) {
+    // Cancel
+    var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
+    %DebugAsyncTaskEvent({
+      type: "cancel",
+      id: id,
+      name: "async function",
+    });
+    // Pop the Promise under construction in an async function on
+    // from catch prediction stack.
+    %DebugPopPromise();
+  }
+}
+
+%InstallToContext([
+  "async_function_await_caught", AsyncFunctionAwaitCaught,
+  "async_function_await_uncaught", AsyncFunctionAwaitUncaught,
+  "reject_promise_no_debug_event", RejectPromiseNoDebugEvent,
+  "async_function_promise_create", AsyncFunctionPromiseCreate,
+  "async_function_promise_release", AsyncFunctionPromiseRelease,
+]);
+
+})
diff --git a/src/js/collection.js b/src/js/collection.js
index 83763af..6fe880d 100644
--- a/src/js/collection.js
+++ b/src/js/collection.js
@@ -16,7 +16,6 @@
 var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
 var MathRandom;
 var MapIterator;
-var NumberIsNaN;
 var SetIterator;
 var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -24,7 +23,6 @@
 utils.Import(function(from) {
   MathRandom = from.MathRandom;
   MapIterator = from.MapIterator;
-  NumberIsNaN = from.NumberIsNaN;
   SetIterator = from.SetIterator;
 });
 
@@ -42,9 +40,9 @@
   if (entry === NOT_FOUND) return entry;
   var candidate = ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets);
   if (key === candidate) return entry;
-  var keyIsNaN = NumberIsNaN(key);
+  var keyIsNaN = NUMBER_IS_NAN(key);
   while (true) {
-    if (keyIsNaN && NumberIsNaN(candidate)) {
+    if (keyIsNaN && NUMBER_IS_NAN(candidate)) {
       return entry;
     }
     entry = ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets);
@@ -62,9 +60,9 @@
   if (entry === NOT_FOUND) return entry;
   var candidate = ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets);
   if (key === candidate) return entry;
-  var keyIsNaN = NumberIsNaN(key);
+  var keyIsNaN = NUMBER_IS_NAN(key);
   while (true) {
-    if (keyIsNaN && NumberIsNaN(candidate)) {
+    if (keyIsNaN && NUMBER_IS_NAN(candidate)) {
       return entry;
     }
     entry = ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets);
diff --git a/src/js/datetime-format-to-parts.js b/src/js/datetime-format-to-parts.js
new file mode 100644
index 0000000..3194f50
--- /dev/null
+++ b/src/js/datetime-format-to-parts.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalIntl = global.Intl;
+var FormatDateToParts = utils.ImportNow("FormatDateToParts");
+
+utils.InstallFunctions(GlobalIntl.DateTimeFormat.prototype,  DONT_ENUM, [
+    'formatToParts', FormatDateToParts
+]);
+})
diff --git a/src/js/harmony-async-await.js b/src/js/harmony-async-await.js
deleted file mode 100644
index 3a48d0c..0000000
--- a/src/js/harmony-async-await.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var AsyncFunctionNext;
-var AsyncFunctionThrow;
-var GlobalPromise;
-var NewPromiseCapability;
-var PerformPromiseThen;
-var PromiseCastResolved;
-
-utils.Import(function(from) {
-  AsyncFunctionNext = from.AsyncFunctionNext;
-  AsyncFunctionThrow = from.AsyncFunctionThrow;
-  GlobalPromise = from.GlobalPromise;
-  NewPromiseCapability = from.NewPromiseCapability;
-  PromiseCastResolved = from.PromiseCastResolved;
-  PerformPromiseThen = from.PerformPromiseThen;
-});
-
-// -------------------------------------------------------------------
-
-function AsyncFunctionAwait(generator, value) {
-  // Promise.resolve(value).then(
-  //     value => AsyncFunctionNext(value),
-  //     error => AsyncFunctionThrow(error)
-  // );
-  var promise = PromiseCastResolved(value);
-
-  var onFulfilled =
-      (sentValue) => %_Call(AsyncFunctionNext, generator, sentValue);
-  var onRejected =
-      (sentError) => %_Call(AsyncFunctionThrow, generator, sentError);
-
-  var throwawayCapability = NewPromiseCapability(GlobalPromise);
-  return PerformPromiseThen(promise, onFulfilled, onRejected,
-                            throwawayCapability);
-}
-
-%InstallToContext([ "async_function_await", AsyncFunctionAwait ]);
-
-})
diff --git a/src/js/i18n.js b/src/js/i18n.js
index 6046a6f..a397849 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -19,7 +19,6 @@
 
 var ArrayJoin;
 var ArrayPush;
-var FLAG_intl_extra;
 var GlobalDate = global.Date;
 var GlobalNumber = global.Number;
 var GlobalRegExp = global.RegExp;
@@ -29,31 +28,21 @@
 var InternalArray = utils.InternalArray;
 var InternalRegExpMatch;
 var InternalRegExpReplace
-var IsNaN;
 var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
 var OverrideFunction = utils.OverrideFunction;
 var patternSymbol = utils.ImportNow("intl_pattern_symbol");
 var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
 var SetFunctionName = utils.SetFunctionName;
 var StringIndexOf;
-var StringLastIndexOf;
-var StringSubstr;
-var StringSubstring;
+var StringSubstr = GlobalString.prototype.substr;
+var StringSubstring = GlobalString.prototype.substring;
 
 utils.Import(function(from) {
   ArrayJoin = from.ArrayJoin;
   ArrayPush = from.ArrayPush;
-  IsNaN = from.IsNaN;
   InternalRegExpMatch = from.InternalRegExpMatch;
   InternalRegExpReplace = from.InternalRegExpReplace;
   StringIndexOf = from.StringIndexOf;
-  StringLastIndexOf = from.StringLastIndexOf;
-  StringSubstr = from.StringSubstr;
-  StringSubstring = from.StringSubstring;
-});
-
-utils.ImportFromExperimental(function(from) {
-  FLAG_intl_extra = from.FLAG_intl_extra;
 });
 
 // Utilities for definitions
@@ -318,7 +307,7 @@
         break;
       }
       // Truncate locale if possible, if not break.
-      var pos = %_Call(StringLastIndexOf, locale, '-');
+      var pos = %StringLastIndexOf(locale, '-');
       if (pos === -1) {
         break;
       }
@@ -441,7 +430,7 @@
         return {'locale': locale, 'extension': extension, 'position': i};
       }
       // Truncate locale if possible.
-      var pos = %_Call(StringLastIndexOf, locale, '-');
+      var pos = %StringLastIndexOf(locale, '-');
       if (pos === -1) {
         break;
       }
@@ -1038,9 +1027,6 @@
   // Writable, configurable and enumerable are set to false by default.
   %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
   collator[resolvedSymbol] = resolved;
-  if (FLAG_intl_extra) {
-    %object_define_property(collator, 'resolved', resolvedAccessor);
-  }
 
   return collator;
 }
@@ -1282,10 +1268,6 @@
 
   %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
   numberFormat[resolvedSymbol] = resolved;
-  if (FLAG_intl_extra) {
-    %object_define_property(resolved, 'pattern', patternAccessor);
-    %object_define_property(numberFormat, 'resolved', resolvedAccessor);
-  }
 
   return numberFormat;
 }
@@ -1388,14 +1370,6 @@
 }
 
 
-/**
- * Returns a Number that represents string value that was passed in.
- */
-function IntlParseNumber(formatter, value) {
-  return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
-                              TO_STRING(value));
-}
-
 AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
 
 /**
@@ -1676,10 +1650,6 @@
 
   %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
   dateFormat[resolvedSymbol] = resolved;
-  if (FLAG_intl_extra) {
-    %object_define_property(resolved, 'pattern', patternAccessor);
-    %object_define_property(dateFormat, 'resolved', resolvedAccessor);
-  }
 
   return dateFormat;
 }
@@ -1797,18 +1767,29 @@
                              new GlobalDate(dateMs));
 }
 
+function FormatDateToParts(dateValue) {
+  if (!IS_UNDEFINED(new.target)) {
+    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+  }
+  CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
+  if (!IS_OBJECT(this)) {
+    throw %make_type_error(kCalledOnNonObject, this);
+  }
+  var dateMs;
+  if (IS_UNDEFINED(dateValue)) {
+    dateMs = %DateCurrentTime();
+  } else {
+    dateMs = TO_NUMBER(dateValue);
+  }
 
-/**
- * Returns a Date object representing the result of calling ToString(value)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- * Returns undefined if date string cannot be parsed.
- */
-function IntlParseDate(formatter, value) {
-  return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
-                            TO_STRING(value));
+  if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
+
+  return %InternalDateFormatToParts(
+      %GetImplFromInitializedIntlObject(this), new GlobalDate(dateMs));
 }
 
+%FunctionSetLength(FormatDateToParts, 0);
+
 
 // 0 because date is optional argument.
 AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
@@ -1889,9 +1870,6 @@
   %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
                                      internalIterator);
   iterator[resolvedSymbol] = resolved;
-  if (FLAG_intl_extra) {
-    %object_define_property(iterator, 'resolved', resolvedAccessor);
-  }
 
   return iterator;
 }
@@ -2227,7 +2205,8 @@
     throw %make_type_error(kMethodInvokedOnWrongType, "Date");
   }
 
-  if (IsNaN(date)) return 'Invalid Date';
+  var dateValue = TO_NUMBER(date);
+  if (NUMBER_IS_NAN(dateValue)) return 'Invalid Date';
 
   var internalOptions = toDateTimeOptions(options, required, defaults);
 
@@ -2291,10 +2270,10 @@
   }
 );
 
+%FunctionRemovePrototype(FormatDateToParts);
+
 utils.Export(function(to) {
-  to.AddBoundMethod = AddBoundMethod;
-  to.IntlParseDate = IntlParseDate;
-  to.IntlParseNumber = IntlParseNumber;
+  to.FormatDateToParts = FormatDateToParts;
 });
 
 })
diff --git a/src/js/intl-extra.js b/src/js/intl-extra.js
deleted file mode 100644
index a4d2256..0000000
--- a/src/js/intl-extra.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalIntl = global.Intl;
-
-var AddBoundMethod = utils.ImportNow("AddBoundMethod");
-var IntlParseDate = utils.ImportNow("IntlParseDate");
-var IntlParseNumber = utils.ImportNow("IntlParseNumber");
-
-AddBoundMethod(GlobalIntl.DateTimeFormat, 'v8Parse', IntlParseDate, 1,
-               'dateformat');
-AddBoundMethod(GlobalIntl.NumberFormat, 'v8Parse', IntlParseNumber, 1,
-               'numberformat');
-
-})
diff --git a/src/js/iterator-prototype.js b/src/js/iterator-prototype.js
deleted file mode 100644
index 6f25019..0000000
--- a/src/js/iterator-prototype.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-  "use strict";
-  %CheckIsBootstrapping();
-
-  var GlobalObject = global.Object;
-  var IteratorPrototype = utils.ImportNow("IteratorPrototype");
-  var iteratorSymbol = utils.ImportNow("iterator_symbol");
-
-  // 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
-  function IteratorPrototypeIterator() {
-    return this;
-  }
-
-  utils.SetFunctionName(IteratorPrototypeIterator, iteratorSymbol);
-  %AddNamedProperty(IteratorPrototype, iteratorSymbol,
-      IteratorPrototypeIterator, DONT_ENUM);
-})
diff --git a/src/js/prologue.js b/src/js/prologue.js
index bb81879..8a07a4c 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -120,7 +120,7 @@
   SetFunctionName(setter, name, "set");
   %FunctionRemovePrototype(getter);
   %FunctionRemovePrototype(setter);
-  %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM);
+  %DefineAccessorPropertyUnchecked(object, name, getter, setter, attributes);
   %SetNativeFlag(getter);
   %SetNativeFlag(setter);
 }
@@ -181,32 +181,15 @@
 
   // Whitelist of exports from normal natives to experimental natives and debug.
   var expose_list = [
-    "AddBoundMethod",
     "ArrayToString",
-    "AsyncFunctionNext",
-    "AsyncFunctionThrow",
+    "FormatDateToParts",
     "GetIterator",
     "GetMethod",
-    "GlobalPromise",
-    "IntlParseDate",
-    "IntlParseNumber",
-    "IsNaN",
     "MapEntries",
     "MapIterator",
     "MapIteratorNext",
     "MaxSimple",
     "MinSimple",
-    "NewPromiseCapability",
-    "NumberIsInteger",
-    "PerformPromiseThen",
-    "PromiseCastResolved",
-    "PromiseThen",
-    "RegExpSubclassExecJS",
-    "RegExpSubclassMatch",
-    "RegExpSubclassReplace",
-    "RegExpSubclassSearch",
-    "RegExpSubclassSplit",
-    "RegExpSubclassTest",
     "SetIterator",
     "SetIteratorNext",
     "SetValues",
@@ -218,11 +201,11 @@
     // From runtime:
     "is_concat_spreadable_symbol",
     "iterator_symbol",
-    "promise_result_symbol",
-    "promise_state_symbol",
     "object_freeze",
     "object_is_frozen",
     "object_is_sealed",
+    "promise_result_symbol",
+    "promise_state_symbol",
     "reflect_apply",
     "reflect_construct",
     "regexp_flags_symbol",
diff --git a/src/js/promise.js b/src/js/promise.js
index b50fc80..793d60f 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -12,8 +12,12 @@
 // Imports
 
 var InternalArray = utils.InternalArray;
-var promiseCombinedDeferredSymbol =
-    utils.ImportNow("promise_combined_deferred_symbol");
+var promiseAsyncStackIDSymbol =
+    utils.ImportNow("promise_async_stack_id_symbol");
+var promiseHandledBySymbol =
+    utils.ImportNow("promise_handled_by_symbol");
+var promiseForwardingHandlerSymbol =
+    utils.ImportNow("promise_forwarding_handler_symbol");
 var promiseHasHandlerSymbol =
     utils.ImportNow("promise_has_handler_symbol");
 var promiseRejectReactionsSymbol =
@@ -22,14 +26,18 @@
     utils.ImportNow("promise_fulfill_reactions_symbol");
 var promiseDeferredReactionsSymbol =
     utils.ImportNow("promise_deferred_reactions_symbol");
+var promiseHandledHintSymbol =
+    utils.ImportNow("promise_handled_hint_symbol");
 var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
 var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
 var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
 var SpeciesConstructor;
 var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var ObjectHasOwnProperty;
 
 utils.Import(function(from) {
+  ObjectHasOwnProperty = from.ObjectHasOwnProperty;
   SpeciesConstructor = from.SpeciesConstructor;
 });
 
@@ -42,9 +50,13 @@
 
 var lastMicrotaskId = 0;
 
+function PromiseNextMicrotaskID() {
+  return ++lastMicrotaskId;
+}
+
 // ES#sec-createresolvingfunctions
 // CreateResolvingFunctions ( promise )
-function CreateResolvingFunctions(promise) {
+function CreateResolvingFunctions(promise, debugEvent) {
   var alreadyResolved = false;
 
   // ES#sec-promise-resolve-functions
@@ -60,7 +72,7 @@
   var reject = reason => {
     if (alreadyResolved === true) return;
     alreadyResolved = true;
-    RejectPromise(promise, reason);
+    RejectPromise(promise, reason, debugEvent);
   };
 
   return {
@@ -83,7 +95,8 @@
   }
 
   var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
-  var callbacks = CreateResolvingFunctions(promise);
+  // Calling the reject function would be a new exception, so debugEvent = true
+  var callbacks = CreateResolvingFunctions(promise, true);
   var debug_is_active = DEBUG_IS_ACTIVE;
   try {
     if (debug_is_active) %DebugPushPromise(promise);
@@ -182,9 +195,24 @@
     }
   });
   if (instrumenting) {
-    id = ++lastMicrotaskId;
-    name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
-    %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+    // In an async function, reuse the existing stack related to the outer
+    // Promise. Otherwise, e.g. in a direct call to then, save a new stack.
+    // Promises with multiple reactions with one or more of them being async
+    // functions will not get a good stack trace, as async functions require
+    // different stacks from direct Promise use, but we save and restore a
+    // stack once for all reactions. TODO(littledan): Improve this case.
+    if (!IS_UNDEFINED(deferreds) &&
+        HAS_PRIVATE(deferreds.promise, promiseHandledBySymbol) &&
+        HAS_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
+                    promiseAsyncStackIDSymbol)) {
+      id = GET_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
+                       promiseAsyncStackIDSymbol);
+      name = "async function";
+    } else {
+      id = PromiseNextMicrotaskID();
+      name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
+      %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+    }
   }
 }
 
@@ -209,16 +237,16 @@
 
     SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
     SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
+    SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
   } else {
     maybeResolveCallbacks.push(onResolve, deferred);
     GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
   }
 }
 
-function PromiseIdResolveHandler(x) { return x }
-function PromiseIdRejectHandler(r) { throw r }
-
-function PromiseNopResolver() {}
+function PromiseIdResolveHandler(x) { return x; }
+function PromiseIdRejectHandler(r) { %_ReThrow(r); }
+SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
 
 // -------------------------------------------------------------------
 // Define exported functions.
@@ -231,21 +259,23 @@
 }
 
 function PromiseCreate() {
-  return new GlobalPromise(PromiseNopResolver)
+  return PromiseInit(new GlobalPromise(promiseRawSymbol));
 }
 
 // ES#sec-promise-resolve-functions
 // Promise Resolve Functions, steps 6-13
 function ResolvePromise(promise, resolution) {
   if (resolution === promise) {
-    return RejectPromise(promise, %make_type_error(kPromiseCyclic, resolution));
+    return RejectPromise(promise,
+                         %make_type_error(kPromiseCyclic, resolution),
+                         true);
   }
   if (IS_RECEIVER(resolution)) {
     // 25.4.1.3.2 steps 8-12
     try {
       var then = resolution.then;
     } catch (e) {
-      return RejectPromise(promise, e);
+      return RejectPromise(promise, e, true);
     }
 
     // Resolution is a native promise and if it's already resolved or
@@ -268,63 +298,80 @@
           // Revoke previously triggered reject event.
           %PromiseRevokeReject(resolution);
         }
-        RejectPromise(promise, thenableValue);
+        // Don't cause a debug event as this case is forwarding a rejection
+        RejectPromise(promise, thenableValue, false);
         SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
         return;
       }
     }
 
     if (IS_CALLABLE(then)) {
-      // PromiseResolveThenableJob
-      var id;
-      var name = "PromiseResolveThenableJob";
+      var callbacks = CreateResolvingFunctions(promise, false);
+      var id, before_debug_event, after_debug_event;
       var instrumenting = DEBUG_IS_ACTIVE;
-      %EnqueueMicrotask(function() {
-        if (instrumenting) {
-          %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
-        }
-        var callbacks = CreateResolvingFunctions(promise);
-        try {
-          %_Call(then, resolution, callbacks.resolve, callbacks.reject);
-        } catch (e) {
-          %_Call(callbacks.reject, UNDEFINED, e);
-        }
-        if (instrumenting) {
-          %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
-        }
-      });
       if (instrumenting) {
-        id = ++lastMicrotaskId;
-        %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+        if (IsPromise(resolution)) {
+          // Mark the dependency of the new promise on the resolution
+          SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
+        }
+        id = PromiseNextMicrotaskID();
+        before_debug_event = {
+          type: "willHandle",
+          id: id,
+          name: "PromiseResolveThenableJob"
+        };
+        after_debug_event = {
+          type: "didHandle",
+          id: id,
+          name: "PromiseResolveThenableJob"
+        };
+        %DebugAsyncTaskEvent({
+          type: "enqueue",
+          id: id,
+          name: "PromiseResolveThenableJob"
+        });
       }
+      %EnqueuePromiseResolveThenableJob(
+          resolution, then, callbacks.resolve, callbacks.reject,
+          before_debug_event, after_debug_event);
       return;
     }
   }
-  FulfillPromise(promise, kFulfilled, resolution, promiseFulfillReactionsSymbol);
+  FulfillPromise(promise, kFulfilled, resolution,
+                 promiseFulfillReactionsSymbol);
 }
 
 // ES#sec-rejectpromise
 // RejectPromise ( promise, reason )
-function RejectPromise(promise, reason) {
+function RejectPromise(promise, reason, debugEvent) {
   // Check promise status to confirm that this reject has an effect.
   // Call runtime for callbacks to the debugger or for unhandled reject.
+  // The debugEvent parameter sets whether a debug ExceptionEvent should
+  // be triggered. It should be set to false when forwarding a rejection
+  // rather than creating a new one.
   if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
-    var debug_is_active = DEBUG_IS_ACTIVE;
-    if (debug_is_active ||
+    // This check is redundant with checks in the runtime, but it may help
+    // avoid unnecessary runtime calls.
+    if ((debugEvent && DEBUG_IS_ACTIVE) ||
         !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
-      %PromiseRejectEvent(promise, reason, debug_is_active);
+      %PromiseRejectEvent(promise, reason, debugEvent);
     }
   }
   FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
 }
 
+// Export to bindings
+function DoRejectPromise(promise, reason) {
+  return RejectPromise(promise, reason, true);
+}
+
 // ES#sec-newpromisecapability
 // NewPromiseCapability ( C )
-function NewPromiseCapability(C) {
+function NewPromiseCapability(C, debugEvent) {
   if (C === GlobalPromise) {
     // Optimized case, avoid extra closure.
-    var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
-    var callbacks = CreateResolvingFunctions(promise);
+    var promise = PromiseCreate();
+    var callbacks = CreateResolvingFunctions(promise, debugEvent);
     return {
       promise: promise,
       resolve: callbacks.resolve,
@@ -355,39 +402,17 @@
   if (this === GlobalPromise) {
     // Optimized case, avoid extra closure.
     var promise = PromiseCreateAndSet(kRejected, r);
-    // The debug event for this would always be an uncaught promise reject,
-    // which is usually simply noise. Do not trigger that debug event.
-    %PromiseRejectEvent(promise, r, false);
+    // Trigger debug events if the debugger is on, as Promise.reject is
+    // equivalent to throwing an exception directly.
+    %PromiseRejectEventFromStack(promise, r);
     return promise;
   } else {
-    var promiseCapability = NewPromiseCapability(this);
+    var promiseCapability = NewPromiseCapability(this, true);
     %_Call(promiseCapability.reject, UNDEFINED, r);
     return promiseCapability.promise;
   }
 }
 
-// Shortcut Promise.reject and Promise.resolve() implementations, used by
-// Async Functions implementation.
-function PromiseCreateRejected(r) {
-  return %_Call(PromiseReject, GlobalPromise, r);
-}
-
-function PromiseCreateResolved(value) {
-  var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
-  var resolveResult = ResolvePromise(promise, value);
-  return promise;
-}
-
-function PromiseCastResolved(value) {
-  if (IsPromise(value)) {
-    return value;
-  } else {
-    var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
-    var resolveResult = ResolvePromise(promise, value);
-    return promise;
-  }
-}
-
 function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
   if (!IS_CALLABLE(onResolve)) onResolve = PromiseIdResolveHandler;
   if (!IS_CALLABLE(onReject)) onReject = PromiseIdRejectHandler;
@@ -427,7 +452,9 @@
   }
 
   var constructor = SpeciesConstructor(this, GlobalPromise);
-  var resultCapability = NewPromiseCapability(constructor);
+  // Pass false for debugEvent so .then chaining does not trigger
+  // redundant ExceptionEvents.
+  var resultCapability = NewPromiseCapability(constructor, false);
   return PerformPromiseThen(this, onResolve, onReject, resultCapability);
 }
 
@@ -449,12 +476,13 @@
 
   // Avoid creating resolving functions.
   if (this === GlobalPromise) {
-    var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+    var promise = PromiseCreate();
     var resolveResult = ResolvePromise(promise, x);
     return promise;
   }
 
-  var promiseCapability = NewPromiseCapability(this);
+  // debugEvent is not so meaningful here as it will be resolved
+  var promiseCapability = NewPromiseCapability(this, true);
   var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
   return promiseCapability.promise;
 }
@@ -466,10 +494,19 @@
     throw %make_type_error(kCalledOnNonObject, "Promise.all");
   }
 
-  var deferred = NewPromiseCapability(this);
+  // false debugEvent so that forwarding the rejection through all does not
+  // trigger redundant ExceptionEvents
+  var deferred = NewPromiseCapability(this, false);
   var resolutions = new InternalArray();
   var count;
 
+  // For catch prediction, don't treat the .then calls as handling it;
+  // instead, recurse outwards.
+  var instrumenting = DEBUG_IS_ACTIVE;
+  if (instrumenting) {
+    SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
+  }
+
   function CreateResolveElementFunction(index, values, promiseCapability) {
     var alreadyCalled = false;
     return (x) => {
@@ -490,10 +527,14 @@
     for (var value of iterable) {
       var nextPromise = this.resolve(value);
       ++count;
-      nextPromise.then(
+      var throwawayPromise = nextPromise.then(
           CreateResolveElementFunction(i, resolutions, deferred),
           deferred.reject);
-      SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+      // For catch prediction, mark that rejections here are semantically
+      // handled by the combined Promise.
+      if (instrumenting && IsPromise(throwawayPromise)) {
+        SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
+      }
       ++i;
     }
 
@@ -517,11 +558,26 @@
     throw %make_type_error(kCalledOnNonObject, PromiseRace);
   }
 
-  var deferred = NewPromiseCapability(this);
+  // false debugEvent so that forwarding the rejection through race does not
+  // trigger redundant ExceptionEvents
+  var deferred = NewPromiseCapability(this, false);
+
+  // For catch prediction, don't treat the .then calls as handling it;
+  // instead, recurse outwards.
+  var instrumenting = DEBUG_IS_ACTIVE;
+  if (instrumenting) {
+    SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
+  }
+
   try {
     for (var value of iterable) {
-      this.resolve(value).then(deferred.resolve, deferred.reject);
-      SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+      var throwawayPromise = this.resolve(value).then(deferred.resolve,
+                                                      deferred.reject);
+      // For catch prediction, mark that rejections here are semantically
+      // handled by the combined Promise.
+      if (instrumenting && IsPromise(throwawayPromise)) {
+        SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
+      }
     }
   } catch (e) {
     deferred.reject(e)
@@ -533,29 +589,48 @@
 // Utility for debugger
 
 function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
-  if (handler !== PromiseIdRejectHandler) {
-    var combinedDeferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
-    if (IS_UNDEFINED(combinedDeferred)) return true;
-    if (PromiseHasUserDefinedRejectHandlerRecursive(combinedDeferred.promise)) {
-      return true;
-    }
-  } else if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
-    return true;
+  // Recurse to the forwarding Promise, if any. This may be due to
+  //  - await reaction forwarding to the throwaway Promise, which has
+  //    a dependency edge to the outer Promise.
+  //  - PromiseIdResolveHandler forwarding to the output of .then
+  //  - Promise.all/Promise.race forwarding to a throwaway Promise, which
+  //    has a dependency edge to the generated outer Promise.
+  if (GET_PRIVATE(handler, promiseForwardingHandlerSymbol)) {
+    return PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise);
   }
-  return false;
+
+  // Otherwise, this is a real reject handler for the Promise
+  return true;
 }
 
 function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
+  // If this promise was marked as being handled by a catch block
+  // in an async function, then it has a user-defined reject handler.
+  if (GET_PRIVATE(promise, promiseHandledHintSymbol)) return true;
+
+  // If this Promise is subsumed by another Promise (a Promise resolved
+  // with another Promise, or an intermediate, hidden, throwaway Promise
+  // within async/await), then recurse on the outer Promise.
+  // In this case, the dependency is one possible way that the Promise
+  // could be resolved, so it does not subsume the other following cases.
+  var outerPromise = GET_PRIVATE(promise, promiseHandledBySymbol);
+  if (outerPromise &&
+      PromiseHasUserDefinedRejectHandlerRecursive(outerPromise)) {
+    return true;
+  }
+
   var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
   var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+
   if (IS_UNDEFINED(queue)) return false;
+
   if (!IS_ARRAY(queue)) {
     return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
-  } else {
-    for (var i = 0; i < queue.length; i += 2) {
-      if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
-        return true;
-      }
+  }
+
+  for (var i = 0; i < queue.length; i += 2) {
+    if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
+      return true;
     }
   }
   return false;
@@ -564,6 +639,8 @@
 // Return whether the promise will be handled by a user-defined reject
 // handler somewhere down the promise chain. For this, we do a depth-first
 // search for a reject handler that's not the default PromiseIdRejectHandler.
+// This function also traverses dependencies of one Promise on another,
+// set up through async/await and Promises resolved with Promises.
 function PromiseHasUserDefinedRejectHandler() {
   return PromiseHasUserDefinedRejectHandlerRecursive(this);
 };
@@ -598,11 +675,9 @@
   "promise_catch", PromiseCatch,
   "promise_create", PromiseCreate,
   "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
-  "promise_reject", RejectPromise,
+  "promise_reject", DoRejectPromise,
   "promise_resolve", ResolvePromise,
-  "promise_then", PromiseThen,
-  "promise_create_rejected", PromiseCreateRejected,
-  "promise_create_resolved", PromiseCreateResolved
+  "promise_then", PromiseThen
 ]);
 
 // This allows extras to create promises quickly without building extra
@@ -611,16 +686,20 @@
 utils.InstallFunctions(extrasUtils, 0, [
   "createPromise", PromiseCreate,
   "resolvePromise", ResolvePromise,
-  "rejectPromise", RejectPromise
+  "rejectPromise", DoRejectPromise
 ]);
 
 utils.Export(function(to) {
-  to.PromiseCastResolved = PromiseCastResolved;
+  to.IsPromise = IsPromise;
+  to.PromiseCreate = PromiseCreate;
   to.PromiseThen = PromiseThen;
+  to.PromiseNextMicrotaskID = PromiseNextMicrotaskID;
 
   to.GlobalPromise = GlobalPromise;
   to.NewPromiseCapability = NewPromiseCapability;
   to.PerformPromiseThen = PerformPromiseThen;
+  to.ResolvePromise = ResolvePromise;
+  to.RejectPromise = RejectPromise;
 });
 
 })
diff --git a/src/js/regexp.js b/src/js/regexp.js
index dbe4837..49da45b 100644
--- a/src/js/regexp.js
+++ b/src/js/regexp.js
@@ -4,20 +4,22 @@
 
 (function(global, utils) {
 
+'use strict';
+
 %CheckIsBootstrapping();
 
 // -------------------------------------------------------------------
 // Imports
 
-var ExpandReplacement;
 var GlobalArray = global.Array;
 var GlobalObject = global.Object;
 var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
 var InternalArray = utils.InternalArray;
 var InternalPackedArray = utils.InternalPackedArray;
 var MaxSimple;
 var MinSimple;
+var RegExpExecJS = GlobalRegExp.prototype.exec;
 var matchSymbol = utils.ImportNow("match_symbol");
 var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
@@ -26,7 +28,6 @@
 var SpeciesConstructor;
 
 utils.Import(function(from) {
-  ExpandReplacement = from.ExpandReplacement;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
   SpeciesConstructor = from.SpeciesConstructor;
@@ -80,37 +81,6 @@
 }
 
 
-// ES#sec-regexp-pattern-flags
-// RegExp ( pattern, flags )
-function RegExpConstructor(pattern, flags) {
-  var newtarget = new.target;
-  var pattern_is_regexp = IsRegExp(pattern);
-
-  if (IS_UNDEFINED(newtarget)) {
-    newtarget = GlobalRegExp;
-
-    // ES6 section 21.2.3.1 step 3.b
-    if (pattern_is_regexp && IS_UNDEFINED(flags) &&
-        pattern.constructor === newtarget) {
-      return pattern;
-    }
-  }
-
-  if (IS_REGEXP(pattern)) {
-    if (IS_UNDEFINED(flags)) flags = PatternFlags(pattern);
-    pattern = REGEXP_SOURCE(pattern);
-
-  } else if (pattern_is_regexp) {
-    var input_pattern = pattern;
-    pattern = pattern.source;
-    if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
-  }
-
-  var object = %_NewObject(GlobalRegExp, newtarget);
-  return RegExpInitialize(object, pattern, flags);
-}
-
-
 // ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
 function RegExpCompileJS(pattern, flags) {
   if (!IS_REGEXP(this)) {
@@ -163,105 +133,6 @@
 endmacro
 
 
-function RegExpExecNoTests(regexp, string, start) {
-  // Must be called with RegExp, string and positive integer as arguments.
-  var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
-  if (matchInfo !== null) {
-    // ES6 21.2.5.2.2 step 18.
-    if (REGEXP_STICKY(regexp)) regexp.lastIndex = matchInfo[CAPTURE1];
-    RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
-  }
-  regexp.lastIndex = 0;
-  return null;
-}
-
-
-// ES#sec-regexp.prototype.exec
-// RegExp.prototype.exec ( string )
-function RegExpSubclassExecJS(string) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'RegExp.prototype.exec', this);
-  }
-
-  string = TO_STRING(string);
-  var lastIndex = this.lastIndex;
-
-  // Conversion is required by the ES2015 specification (RegExpBuiltinExec
-  // algorithm, step 4) even if the value is discarded for non-global RegExps.
-  var i = TO_LENGTH(lastIndex);
-
-  var global = TO_BOOLEAN(REGEXP_GLOBAL(this));
-  var sticky = TO_BOOLEAN(REGEXP_STICKY(this));
-  var updateLastIndex = global || sticky;
-  if (updateLastIndex) {
-    if (i > string.length) {
-      this.lastIndex = 0;
-      return null;
-    }
-  } else {
-    i = 0;
-  }
-
-  // matchIndices is either null or the RegExpLastMatchInfo array.
-  // TODO(littledan): Whether a RegExp is sticky is compiled into the RegExp
-  // itself, but ES2015 allows monkey-patching this property to differ from
-  // the internal flags. If it differs, recompile a different RegExp?
-  var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-
-  if (IS_NULL(matchIndices)) {
-    this.lastIndex = 0;
-    return null;
-  }
-
-  // Successful match.
-  if (updateLastIndex) {
-    this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
-  }
-  RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
-}
-%FunctionRemovePrototype(RegExpSubclassExecJS);
-
-
-// Legacy implementation of RegExp.prototype.exec
-function RegExpExecJS(string) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'RegExp.prototype.exec', this);
-  }
-
-  string = TO_STRING(string);
-  var lastIndex = this.lastIndex;
-
-  // Conversion is required by the ES2015 specification (RegExpBuiltinExec
-  // algorithm, step 4) even if the value is discarded for non-global RegExps.
-  var i = TO_LENGTH(lastIndex);
-
-  var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
-  if (updateLastIndex) {
-    if (i < 0 || i > string.length) {
-      this.lastIndex = 0;
-      return null;
-    }
-  } else {
-    i = 0;
-  }
-
-  // matchIndices is either null or the RegExpLastMatchInfo array.
-  var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-
-  if (IS_NULL(matchIndices)) {
-    this.lastIndex = 0;
-    return null;
-  }
-
-  // Successful match.
-  if (updateLastIndex) {
-    this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
-  }
-  RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
-}
-
 
 // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
 // Also takes an optional exec method in case our caller
@@ -282,65 +153,6 @@
 %SetForceInlineFlag(RegExpSubclassExec);
 
 
-// One-element cache for the simplified test regexp.
-var regexp_key;
-var regexp_val;
-
-// Legacy implementation of RegExp.prototype.test
-// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
-// that test is defined in terms of String.prototype.exec. However, it probably
-// means the original value of String.prototype.exec, which is what everybody
-// else implements.
-function RegExpTest(string) {
-  if (!IS_REGEXP(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'RegExp.prototype.test', this);
-  }
-  string = TO_STRING(string);
-
-  var lastIndex = this.lastIndex;
-
-  // Conversion is required by the ES2015 specification (RegExpBuiltinExec
-  // algorithm, step 4) even if the value is discarded for non-global RegExps.
-  var i = TO_LENGTH(lastIndex);
-
-  if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
-    if (i < 0 || i > string.length) {
-      this.lastIndex = 0;
-      return false;
-    }
-    // matchIndices is either null or the RegExpLastMatchInfo array.
-    var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-    if (IS_NULL(matchIndices)) {
-      this.lastIndex = 0;
-      return false;
-    }
-    this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
-    return true;
-  } else {
-    // Non-global, non-sticky regexp.
-    // Remove irrelevant preceeding '.*' in a test regexp.  The expression
-    // checks whether this.source starts with '.*' and that the third char is
-    // not a '?'.  But see https://code.google.com/p/v8/issues/detail?id=3560
-    var regexp = this;
-    var source = REGEXP_SOURCE(regexp);
-    if (source.length >= 3 &&
-        %_StringCharCodeAt(source, 0) == 46 &&  // '.'
-        %_StringCharCodeAt(source, 1) == 42 &&  // '*'
-        %_StringCharCodeAt(source, 2) != 63) {  // '?'
-      regexp = TrimRegExp(regexp);
-    }
-    // matchIndices is either null or the RegExpLastMatchInfo array.
-    var matchIndices = %_RegExpExec(regexp, string, 0, RegExpLastMatchInfo);
-    if (IS_NULL(matchIndices)) {
-      this.lastIndex = 0;
-      return false;
-    }
-    return true;
-  }
-}
-
-
 // ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
 function RegExpSubclassTest(string) {
   if (!IS_RECEIVER(this)) {
@@ -353,18 +165,6 @@
 }
 %FunctionRemovePrototype(RegExpSubclassTest);
 
-function TrimRegExp(regexp) {
-  if (regexp_key !== regexp) {
-    regexp_key = regexp;
-    regexp_val =
-      new GlobalRegExp(
-          %_SubString(REGEXP_SOURCE(regexp), 2, REGEXP_SOURCE(regexp).length),
-          (REGEXP_IGNORE_CASE(regexp) ? REGEXP_MULTILINE(regexp) ? "im" : "i"
-                                      : REGEXP_MULTILINE(regexp) ? "m" : ""));
-  }
-  return regexp_val;
-}
-
 
 function RegExpToString() {
   if (!IS_RECEIVER(this)) {
@@ -383,14 +183,13 @@
   var first = %_StringCharCodeAt(subject, index);
   if (first < 0xD800 || first > 0xDBFF) return false;
   var second = %_StringCharCodeAt(subject, index + 1);
-  return second >= 0xDC00 || second <= 0xDFFF;
+  return second >= 0xDC00 && second <= 0xDFFF;
 }
 
 
-// Legacy implementation of RegExp.prototype[Symbol.split] which
+// Fast path implementation of RegExp.prototype[Symbol.split] which
 // doesn't properly call the underlying exec, @@species methods
 function RegExpSplit(string, limit) {
-  // TODO(yangguo): allow non-regexp receivers.
   if (!IS_REGEXP(this)) {
     throw %make_type_error(kIncompatibleMethodReceiver,
                         "RegExp.prototype.@@split", this);
@@ -473,15 +272,11 @@
   var constructor = SpeciesConstructor(this, GlobalRegExp);
   var flags = TO_STRING(this.flags);
 
-  // TODO(adamk): this fast path is wrong with respect to this.global
-  // and this.sticky, but hopefully the spec will remove those gets
-  // and thus make the assumption of 'exec' having no side-effects
-  // more correct. Also, we doesn't ensure that 'exec' is actually
-  // a data property on RegExp.prototype.
-  var exec;
+  // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
+  // is actually a data property on RegExp.prototype.
   if (IS_REGEXP(this) && constructor === GlobalRegExp) {
-    exec = this.exec;
-    if (exec === RegExpSubclassExecJS) {
+    var exec = this.exec;
+    if (exec === RegExpExecJS) {
       return %_Call(RegExpSplit, this, string, limit);
     }
   }
@@ -505,9 +300,7 @@
   var stringIndex = prevStringIndex;
   while (stringIndex < size) {
     splitter.lastIndex = stringIndex;
-    result = RegExpSubclassExec(splitter, string, exec);
-    // Ensure exec will be read again on the next loop through.
-    exec = UNDEFINED;
+    result = RegExpSubclassExec(splitter, string);
     if (IS_NULL(result)) {
       stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
     } else {
@@ -697,6 +490,31 @@
   return result + %_SubString(subject, endOfMatch, subject.length);
 }
 
+// Wraps access to matchInfo's captures into a format understood by
+// GetSubstitution.
+function MatchInfoCaptureWrapper(matches, subject) {
+  this.length = NUMBER_OF_CAPTURES(matches) >> 1;
+  this.match = matches;
+  this.subject = subject;
+}
+
+MatchInfoCaptureWrapper.prototype.at = function(ix) {
+  const match = this.match;
+  const start = match[CAPTURE(ix << 1)];
+  if (start < 0) return UNDEFINED;
+  return %_SubString(this.subject, start, match[CAPTURE((ix << 1) + 1)]);
+};
+%SetForceInlineFlag(MatchInfoCaptureWrapper.prototype.at);
+
+function ArrayCaptureWrapper(array) {
+  this.length = array.length;
+  this.array = array;
+}
+
+ArrayCaptureWrapper.prototype.at = function(ix) {
+  return this.array[ix];
+};
+%SetForceInlineFlag(ArrayCaptureWrapper.prototype.at);
 
 function RegExpReplace(string, replace) {
   if (!IS_REGEXP(this)) {
@@ -720,9 +538,17 @@
         return %_SubString(subject, 0, match[CAPTURE0]) +
                %_SubString(subject, match[CAPTURE1], subject.length)
       }
-      return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
-                                 %_SubString(subject, 0, match[CAPTURE0])) +
-             %_SubString(subject, match[CAPTURE1], subject.length);
+      const captures = new MatchInfoCaptureWrapper(match, subject);
+      const start = match[CAPTURE0];
+      const end = match[CAPTURE1];
+
+      const prefix = %_SubString(subject, 0, start);
+      const matched = %_SubString(subject, start, end);
+      const suffix = %_SubString(subject, end, subject.length);
+
+      return prefix +
+             GetSubstitution(matched, subject, start, captures, replace) +
+             suffix;
     }
 
     // Global regexp search, string replace.
@@ -744,8 +570,6 @@
 // GetSubstitution(matched, str, position, captures, replacement)
 // Expand the $-expressions in the string and return a new string with
 // the result.
-// TODO(littledan): Call this function from String.prototype.replace instead
-// of the very similar ExpandReplacement in src/js/string.js
 function GetSubstitution(matched, string, position, captures, replacement) {
   var matchLength = matched.length;
   var stringLength = string.length;
@@ -794,7 +618,7 @@
           }
         }
         if (scaledIndex != 0 && scaledIndex < capturesLength) {
-          var capture = captures[scaledIndex];
+          var capture = captures.at(scaledIndex);
           if (!IS_UNDEFINED(capture)) result += capture;
           pos += advance;
         } else {
@@ -869,16 +693,12 @@
     this.lastIndex = 0;
   }
 
-  // TODO(adamk): this fast path is wrong with respect to this.global
-  // and this.sticky, but hopefully the spec will remove those gets
-  // and thus make the assumption of 'exec' having no side-effects
-  // more correct. Also, we doesn't ensure that 'exec' is actually
-  // a data property on RegExp.prototype, nor does the fast path
-  // correctly handle lastIndex setting.
+  // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
+  // is actually a data property on RegExp.prototype.
   var exec;
   if (IS_REGEXP(this)) {
     exec = this.exec;
-    if (exec === RegExpSubclassExecJS) {
+    if (exec === RegExpExecJS) {
       return %_Call(RegExpReplace, this, string, replace);
     }
   }
@@ -922,7 +742,8 @@
       replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
                                    parameters.length);
     } else {
-      replacement = GetSubstitution(matched, string, position, captures,
+      const capturesWrapper = new ArrayCaptureWrapper(captures);
+      replacement = GetSubstitution(matched, string, position, capturesWrapper,
                                     replace);
     }
     if (position >= nextSourcePosition) {
@@ -946,9 +767,10 @@
   }
   string = TO_STRING(string);
   var previousLastIndex = this.lastIndex;
-  this.lastIndex = 0;
+  if (previousLastIndex != 0) this.lastIndex = 0;
   var result = RegExpSubclassExec(this, string);
-  this.lastIndex = previousLastIndex;
+  var currentLastIndex = this.lastIndex;
+  if (currentLastIndex != previousLastIndex) this.lastIndex = previousLastIndex;
   if (IS_NULL(result)) return -1;
   return result.index;
 }
@@ -1035,7 +857,6 @@
 // ES6 21.2.5.4.
 function RegExpGetGlobal() {
   if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
@@ -1050,7 +871,6 @@
 // ES6 21.2.5.5.
 function RegExpGetIgnoreCase() {
   if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
@@ -1064,7 +884,6 @@
 // ES6 21.2.5.7.
 function RegExpGetMultiline() {
   if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
@@ -1078,7 +897,6 @@
 // ES6 21.2.5.10.
 function RegExpGetSource() {
   if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeSourceGetter);
       return "(?:)";
@@ -1092,8 +910,6 @@
 // ES6 21.2.5.12.
 function RegExpGetSticky() {
   if (!IS_REGEXP(this)) {
-    // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
-    // TODO(littledan): Remove this workaround or standardize it
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeStickyGetter);
       return UNDEFINED;
@@ -1108,7 +924,6 @@
 // ES6 21.2.5.15.
 function RegExpGetUnicode() {
   if (!IS_REGEXP(this)) {
-    // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
       %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
       return UNDEFINED;
@@ -1127,17 +942,9 @@
 
 // -------------------------------------------------------------------
 
-%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
-GlobalRegExpPrototype = new GlobalObject();
-%FunctionSetPrototype(GlobalRegExp, GlobalRegExpPrototype);
-%AddNamedProperty(
-    GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
-%SetCode(GlobalRegExp, RegExpConstructor);
-
 utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
 
 utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
-  "exec", RegExpSubclassExecJS,
   "test", RegExpSubclassTest,
   "toString", RegExpToString,
   "compile", RegExpCompileJS,
@@ -1166,11 +973,20 @@
   LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
 };
 
+// TODO(jgruber): All of these getters and setters were intended to be installed
+// with various attributes (e.g. DONT_ENUM | DONT_DELETE), but
+// InstallGetterSetter had a bug which ignored the passed attributes and
+// simply installed as DONT_ENUM instead. We might want to change back
+// to the intended attributes at some point.
+// On the other hand, installing attributes as DONT_ENUM matches the draft
+// specification at
+// https://github.com/claudepache/es-regexp-legacy-static-properties
+
 %OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
 utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
-                          DONT_DELETE);
+                          DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
-                          DONT_ENUM | DONT_DELETE);
+                          DONT_ENUM);
 
 
 var NoOpSetter = function(ignored) {};
@@ -1178,28 +994,30 @@
 
 // Static properties set by a successful match.
 utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
-                          NoOpSetter, DONT_DELETE);
+                          NoOpSetter, DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
-                          DONT_ENUM | DONT_DELETE);
+                          DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
-                          NoOpSetter, DONT_DELETE);
+                          NoOpSetter, DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
-                          DONT_ENUM | DONT_DELETE);
+                          DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
-                          NoOpSetter, DONT_DELETE);
+                          NoOpSetter, DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
-                          DONT_ENUM | DONT_DELETE);
+                          DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
-                          NoOpSetter, DONT_DELETE);
+                          NoOpSetter, DONT_ENUM);
 utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
-                          DONT_ENUM | DONT_DELETE);
+                          DONT_ENUM);
 
 for (var i = 1; i < 10; ++i) {
   utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
-                            NoOpSetter, DONT_DELETE);
+                            NoOpSetter, DONT_ENUM);
 }
 %ToFastProperties(GlobalRegExp);
 
+%InstallToContext(["regexp_last_match_info", RegExpLastMatchInfo]);
+
 // -------------------------------------------------------------------
 // Internal
 
@@ -1228,13 +1046,13 @@
 // Exports
 
 utils.Export(function(to) {
+  to.GetSubstitution = GetSubstitution;
   to.InternalRegExpMatch = InternalRegExpMatch;
   to.InternalRegExpReplace = InternalRegExpReplace;
   to.IsRegExp = IsRegExp;
   to.RegExpExec = DoRegExpExec;
   to.RegExpInitialize = RegExpInitialize;
   to.RegExpLastMatchInfo = RegExpLastMatchInfo;
-  to.RegExpTest = RegExpTest;
 });
 
 })
diff --git a/src/js/string-iterator.js b/src/js/string-iterator.js
deleted file mode 100644
index 2319e5a..0000000
--- a/src/js/string-iterator.js
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalString = global.String;
-var IteratorPrototype = utils.ImportNow("IteratorPrototype");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var stringIteratorIteratedStringSymbol =
-    utils.ImportNow("string_iterator_iterated_string_symbol");
-var stringIteratorNextIndexSymbol =
-    utils.ImportNow("string_iterator_next_index_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-// -------------------------------------------------------------------
-
-function StringIterator() {}
-
-
-// 21.1.5.1 CreateStringIterator Abstract Operation
-function CreateStringIterator(string) {
-  CHECK_OBJECT_COERCIBLE(string, 'String.prototype[Symbol.iterator]');
-  var s = TO_STRING(string);
-  var iterator = new StringIterator;
-  SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
-  SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, 0);
-  return iterator;
-}
-
-
-// ES6 section 21.1.5.2.1 %StringIteratorPrototype%.next ( )
-function StringIteratorNext() {
-  var iterator = this;
-  var value = UNDEFINED;
-  var done = true;
-
-  if (!IS_RECEIVER(iterator) ||
-      !HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'String Iterator.prototype.next');
-  }
-
-  var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol);
-  if (!IS_UNDEFINED(s)) {
-    var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
-    var length = TO_UINT32(s.length);
-    if (position >= length) {
-      SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, UNDEFINED);
-    } else {
-      var first = %_StringCharCodeAt(s, position);
-      value = %_StringCharFromCode(first);
-      done = false;
-      position++;
-
-      if (first >= 0xD800 && first <= 0xDBFF && position < length) {
-        var second = %_StringCharCodeAt(s, position);
-        if (second >= 0xDC00 && second <= 0xDFFF) {
-          value += %_StringCharFromCode(second);
-          position++;
-        }
-      }
-
-      SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
-    }
-  }
-  return %_CreateIterResultObject(value, done);
-}
-
-
-// 21.1.3.27 String.prototype [ @@iterator ]( )
-function StringPrototypeIterator() {
-  return CreateStringIterator(this);
-}
-
-//-------------------------------------------------------------------
-
-%FunctionSetPrototype(StringIterator, {__proto__: IteratorPrototype});
-%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
-
-utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
-  'next', StringIteratorNext
-]);
-%AddNamedProperty(StringIterator.prototype, toStringTagSymbol,
-                  "String Iterator", READ_ONLY | DONT_ENUM);
-
-utils.SetFunctionName(StringPrototypeIterator, iteratorSymbol);
-%AddNamedProperty(GlobalString.prototype, iteratorSymbol,
-                  StringPrototypeIterator, DONT_ENUM);
-
-})
diff --git a/src/js/string.js b/src/js/string.js
index 38caab7..7c552a9 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -10,6 +10,7 @@
 // Imports
 
 var ArrayJoin;
+var GetSubstitution;
 var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
 var IsRegExp;
@@ -23,6 +24,7 @@
 
 utils.Import(function(from) {
   ArrayJoin = from.ArrayJoin;
+  GetSubstitution = from.GetSubstitution;
   IsRegExp = from.IsRegExp;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
@@ -59,45 +61,6 @@
 %FunctionSetLength(StringIndexOf, 1);
 
 
-// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(pat, pos) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
-
-  var sub = TO_STRING(this);
-  var subLength = sub.length;
-  var pat = TO_STRING(pat);
-  var patLength = pat.length;
-  var index = subLength - patLength;
-  var position = TO_NUMBER(pos);
-  if (!NUMBER_IS_NAN(position)) {
-    position = TO_INTEGER(position);
-    if (position < 0) {
-      position = 0;
-    }
-    if (position + patLength < subLength) {
-      index = position;
-    }
-  }
-  if (index < 0) {
-    return -1;
-  }
-  return %StringLastIndexOf(sub, pat, index);
-}
-
-%FunctionSetLength(StringLastIndexOf, 1);
-
-
-// ECMA-262 section 15.5.4.9
-//
-// This function is implementation specific.  For now, we do not
-// do anything locale specific.
-function StringLocaleCompareJS(other) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare");
-
-  return %StringLocaleCompare(TO_STRING(this), TO_STRING(other));
-}
-
-
 // ES6 21.1.3.11.
 function StringMatchJS(pattern) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
@@ -118,38 +81,6 @@
 }
 
 
-// ECMA-262 v6, section 21.1.3.12
-//
-// For now we do nothing, as proper normalization requires big tables.
-// If Intl is enabled, then i18n.js will override it and provide the the
-// proper functionality.
-function StringNormalize(formArg) {  // length == 0
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
-  var s = TO_STRING(this);
-
-  var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
-
-  var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
-  var normalizationForm = %ArrayIndexOf(NORMALIZATION_FORMS, form, 0);
-  if (normalizationForm === -1) {
-    throw %make_range_error(kNormalizationForm,
-                         %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
-  }
-
-  return s;
-}
-
-%FunctionSetLength(StringNormalize, 0);
-
-
-// This has the same size as the RegExpLastMatchInfo array, and can be used
-// for functions that expect that structure to be returned.  It is used when
-// the needle is a string rather than a regexp.  In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
 // ES6, section 21.1.3.14
 function StringReplace(search, replace) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
@@ -201,101 +132,18 @@
   if (IS_CALLABLE(replace)) {
     result += replace(search, start, subject);
   } else {
-    reusableMatchInfo[CAPTURE0] = start;
-    reusableMatchInfo[CAPTURE1] = end;
-    result = ExpandReplacement(TO_STRING(replace),
-                               subject,
-                               reusableMatchInfo,
-                               result);
+    // In this case, we don't have any capture groups and can get away with
+    // faking the captures object by simply setting its length to 1.
+    const captures = { length: 1 };
+    const matched = %_SubString(subject, start, end);
+    result += GetSubstitution(matched, subject, start, captures,
+                              TO_STRING(replace));
   }
 
   return result + %_SubString(subject, end, subject.length);
 }
 
 
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function ExpandReplacement(string, subject, matchInfo, result) {
-  var length = string.length;
-  var next = %StringIndexOf(string, '$', 0);
-  if (next < 0) {
-    if (length > 0) result += string;
-    return result;
-  }
-
-  if (next > 0) result += %_SubString(string, 0, next);
-
-  while (true) {
-    var expansion = '$';
-    var position = next + 1;
-    if (position < length) {
-      var peek = %_StringCharCodeAt(string, position);
-      if (peek == 36) {         // $$
-        ++position;
-        result += '$';
-      } else if (peek == 38) {  // $& - match
-        ++position;
-        result +=
-          %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
-      } else if (peek == 96) {  // $` - prefix
-        ++position;
-        result += %_SubString(subject, 0, matchInfo[CAPTURE0]);
-      } else if (peek == 39) {  // $' - suffix
-        ++position;
-        result += %_SubString(subject, matchInfo[CAPTURE1], subject.length);
-      } else if (peek >= 48 && peek <= 57) {
-        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
-        var scaled_index = (peek - 48) << 1;
-        var advance = 1;
-        var number_of_captures = NUMBER_OF_CAPTURES(matchInfo);
-        if (position + 1 < string.length) {
-          var next = %_StringCharCodeAt(string, position + 1);
-          if (next >= 48 && next <= 57) {
-            var new_scaled_index = scaled_index * 10 + ((next - 48) << 1);
-            if (new_scaled_index < number_of_captures) {
-              scaled_index = new_scaled_index;
-              advance = 2;
-            }
-          }
-        }
-        if (scaled_index != 0 && scaled_index < number_of_captures) {
-          var start = matchInfo[CAPTURE(scaled_index)];
-          if (start >= 0) {
-            result +=
-              %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]);
-          }
-          position += advance;
-        } else {
-          result += '$';
-        }
-      } else {
-        result += '$';
-      }
-    } else {
-      result += '$';
-    }
-
-    // Go the the next $ in the string.
-    next = %StringIndexOf(string, '$', position);
-
-    // Return if there are no more $ characters in the string. If we
-    // haven't reached the end, we need to append the suffix.
-    if (next < 0) {
-      if (position < length) {
-        result += %_SubString(string, position, length);
-      }
-      return result;
-    }
-
-    // Append substring between the previous and the next $ character.
-    if (next > position) {
-      result += %_SubString(string, position, next);
-    }
-  }
-  return result;
-}
-
-
 // ES6 21.1.3.15.
 function StringSearch(pattern) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -390,55 +238,6 @@
 }
 
 
-// ECMA-262 section 15.5.4.15
-function StringSubstring(start, end) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.subString");
-
-  var s = TO_STRING(this);
-  var s_len = s.length;
-
-  var start_i = TO_INTEGER(start);
-  if (start_i < 0) {
-    start_i = 0;
-  } else if (start_i > s_len) {
-    start_i = s_len;
-  }
-
-  var end_i = s_len;
-  if (!IS_UNDEFINED(end)) {
-    end_i = TO_INTEGER(end);
-    if (end_i > s_len) {
-      end_i = s_len;
-    } else {
-      if (end_i < 0) end_i = 0;
-      if (start_i > end_i) {
-        var tmp = end_i;
-        end_i = start_i;
-        start_i = tmp;
-      }
-    }
-  }
-
-  return %_SubString(s, start_i, end_i);
-}
-
-
-// ecma262/#sec-string.prototype.substr
-function StringSubstr(start, length) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
-  var s = TO_STRING(this);
-  var size = s.length;
-  start = TO_INTEGER(start);
-  length = IS_UNDEFINED(length) ? size : TO_INTEGER(length);
-
-  if (start < 0) start = MaxSimple(size + start, 0);
-  length = MinSimple(MaxSimple(length, 0), size - start);
-
-  if (length <= 0) return '';
-  return %_SubString(s, start, start + length);
-}
-
-
 // ECMA-262, 15.5.4.16
 function StringToLowerCaseJS() {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
@@ -737,17 +536,12 @@
   "endsWith", StringEndsWith,
   "includes", StringIncludes,
   "indexOf", StringIndexOf,
-  "lastIndexOf", StringLastIndexOf,
-  "localeCompare", StringLocaleCompareJS,
   "match", StringMatchJS,
-  "normalize", StringNormalize,
   "repeat", StringRepeat,
   "replace", StringReplace,
   "search", StringSearch,
   "slice", StringSlice,
   "split", StringSplitJS,
-  "substring", StringSubstring,
-  "substr", StringSubstr,
   "startsWith", StringStartsWith,
   "toLowerCase", StringToLowerCaseJS,
   "toLocaleLowerCase", StringToLocaleLowerCase,
@@ -773,15 +567,11 @@
 // Exports
 
 utils.Export(function(to) {
-  to.ExpandReplacement = ExpandReplacement;
   to.StringIndexOf = StringIndexOf;
-  to.StringLastIndexOf = StringLastIndexOf;
   to.StringMatch = StringMatchJS;
   to.StringReplace = StringReplace;
   to.StringSlice = StringSlice;
   to.StringSplit = StringSplitJS;
-  to.StringSubstr = StringSubstr;
-  to.StringSubstring = StringSubstring;
 });
 
 })
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index b97a9c8..edb3b06 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -19,7 +19,6 @@
 var GlobalArray = global.Array;
 var GlobalArrayBuffer = global.ArrayBuffer;
 var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
-var GlobalDataView = global.DataView;
 var GlobalObject = global.Object;
 var InnerArrayCopyWithin;
 var InnerArrayEvery;
@@ -35,7 +34,6 @@
 var InnerArraySort;
 var InnerArrayToLocaleString;
 var InternalArray = utils.InternalArray;
-var IsNaN;
 var MaxSimple;
 var MinSimple;
 var PackedArrayReverse;
@@ -84,7 +82,6 @@
   InnerArraySome = from.InnerArraySome;
   InnerArraySort = from.InnerArraySort;
   InnerArrayToLocaleString = from.InnerArrayToLocaleString;
-  IsNaN = from.IsNaN;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
   PackedArrayReverse = from.PackedArrayReverse;
@@ -545,9 +542,9 @@
     return -1;
   } else if (x > y) {
     return 1;
-  } else if (IsNaN(x) && IsNaN(y)) {
-    return IsNaN(y) ? 0 : 1;
-  } else if (IsNaN(x)) {
+  } else if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) {
+    return NUMBER_IS_NAN(y) ? 0 : 1;
+  } else if (NUMBER_IS_NAN(x)) {
     return 1;
   }
   return 0;
@@ -915,68 +912,4 @@
 
 TYPED_ARRAYS(SETUP_TYPED_ARRAY)
 
-// --------------------------- DataView -----------------------------
-
-macro DATA_VIEW_TYPES(FUNCTION)
-  FUNCTION(Int8)
-  FUNCTION(Uint8)
-  FUNCTION(Int16)
-  FUNCTION(Uint16)
-  FUNCTION(Int32)
-  FUNCTION(Uint32)
-  FUNCTION(Float32)
-  FUNCTION(Float64)
-endmacro
-
-
-macro DATA_VIEW_GETTER_SETTER(TYPENAME)
-function DataViewGetTYPENAMEJS(offset, little_endian) {
-  if (!IS_DATAVIEW(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'DataView.getTYPENAME', this);
-  }
-  offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
-  return %DataViewGetTYPENAME(this, offset, !!little_endian);
-}
-%FunctionSetLength(DataViewGetTYPENAMEJS, 1);
-
-function DataViewSetTYPENAMEJS(offset, value, little_endian) {
-  if (!IS_DATAVIEW(this)) {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        'DataView.setTYPENAME', this);
-  }
-  offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
-  %DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
-}
-%FunctionSetLength(DataViewSetTYPENAMEJS, 2);
-endmacro
-
-DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
-
-utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
-  "getInt8", DataViewGetInt8JS,
-  "setInt8", DataViewSetInt8JS,
-
-  "getUint8", DataViewGetUint8JS,
-  "setUint8", DataViewSetUint8JS,
-
-  "getInt16", DataViewGetInt16JS,
-  "setInt16", DataViewSetInt16JS,
-
-  "getUint16", DataViewGetUint16JS,
-  "setUint16", DataViewSetUint16JS,
-
-  "getInt32", DataViewGetInt32JS,
-  "setInt32", DataViewSetInt32JS,
-
-  "getUint32", DataViewGetUint32JS,
-  "setUint32", DataViewSetUint32JS,
-
-  "getFloat32", DataViewGetFloat32JS,
-  "setFloat32", DataViewSetFloat32JS,
-
-  "getFloat64", DataViewGetFloat64JS,
-  "setFloat64", DataViewSetFloat64JS
-]);
-
 })
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 0c0a792..93636a0 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -18,20 +18,6 @@
 // ----------------------------------------------------------------------------
 
 
-// ES6 18.2.3 isNaN(number)
-function GlobalIsNaN(number) {
-  number = TO_NUMBER(number);
-  return NUMBER_IS_NAN(number);
-}
-
-
-// ES6 18.2.2 isFinite(number)
-function GlobalIsFinite(number) {
-  number = TO_NUMBER(number);
-  return NUMBER_IS_FINITE(number);
-}
-
-
 // ES6 18.2.5 parseInt(string, radix)
 function GlobalParseInt(string, radix) {
   if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
@@ -91,8 +77,6 @@
 
 // Set up non-enumerable function on the global object.
 utils.InstallFunctions(global, DONT_ENUM, [
-  "isNaN", GlobalIsNaN,
-  "isFinite", GlobalIsFinite,
   "parseInt", GlobalParseInt,
   "parseFloat", GlobalParseFloat,
 ]);
@@ -207,38 +191,6 @@
 // ----------------------------------------------------------------------------
 // Number
 
-// Harmony isFinite.
-function NumberIsFinite(number) {
-  return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
-}
-
-
-// Harmony isInteger
-function NumberIsInteger(number) {
-  return NumberIsFinite(number) && TO_INTEGER(number) == number;
-}
-
-
-// Harmony isNaN.
-function NumberIsNaN(number) {
-  return IS_NUMBER(number) && NUMBER_IS_NAN(number);
-}
-
-
-// Harmony isSafeInteger
-function NumberIsSafeInteger(number) {
-  if (NumberIsFinite(number)) {
-    var integral = TO_INTEGER(number);
-    if (integral == number) {
-      return -kMaxSafeInteger <= integral && integral <= kMaxSafeInteger;
-    }
-  }
-  return false;
-}
-
-
-// ----------------------------------------------------------------------------
-
 utils.InstallConstants(GlobalNumber, [
   // ECMA-262 section 15.7.3.1.
   "MAX_VALUE", 1.7976931348623157e+308,
@@ -260,15 +212,10 @@
 
 // Harmony Number constructor additions
 utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
-  "isFinite", NumberIsFinite,
-  "isInteger", NumberIsInteger,
-  "isNaN", NumberIsNaN,
-  "isSafeInteger", NumberIsSafeInteger,
   "parseInt", GlobalParseInt,
   "parseFloat", GlobalParseFloat
 ]);
 
-%SetForceInlineFlag(NumberIsNaN);
 
 
 // ----------------------------------------------------------------------------
@@ -295,9 +242,6 @@
 utils.Export(function(to) {
   to.GetIterator = GetIterator;
   to.GetMethod = GetMethod;
-  to.IsNaN = GlobalIsNaN;
-  to.NumberIsNaN = NumberIsNaN;
-  to.NumberIsInteger = NumberIsInteger;
   to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
 });
 
diff --git a/src/json-parser.cc b/src/json-parser.cc
index bf2fd0d..576100a 100644
--- a/src/json-parser.cc
+++ b/src/json-parser.cc
@@ -11,10 +11,10 @@
 #include "src/field-type.h"
 #include "src/messages.h"
 #include "src/objects-inl.h"
-#include "src/parsing/scanner.h"
 #include "src/parsing/token.h"
 #include "src/property-descriptor.h"
 #include "src/transitions.h"
+#include "src/unicode-cache.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index 2f81248..f64143e 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -39,9 +39,14 @@
 const int DefaultPlatform::kMaxThreadPoolSize = 8;
 
 DefaultPlatform::DefaultPlatform()
-    : initialized_(false), thread_pool_size_(0), tracing_controller_(NULL) {}
+    : initialized_(false), thread_pool_size_(0) {}
 
 DefaultPlatform::~DefaultPlatform() {
+  if (tracing_controller_) {
+    tracing_controller_->StopTracing();
+    tracing_controller_.reset();
+  }
+
   base::LockGuard<base::Mutex> guard(&lock_);
   queue_.Terminate();
   if (initialized_) {
@@ -63,11 +68,6 @@
       i->second.pop();
     }
   }
-
-  if (tracing_controller_) {
-    tracing_controller_->StopTracing();
-    delete tracing_controller_;
-  }
 }
 
 
@@ -178,16 +178,17 @@
          static_cast<double>(base::Time::kMicrosecondsPerSecond);
 }
 
-
 uint64_t DefaultPlatform::AddTraceEvent(
     char phase, const uint8_t* category_enabled_flag, const char* name,
     const char* scope, uint64_t id, uint64_t bind_id, int num_args,
     const char** arg_names, const uint8_t* arg_types,
-    const uint64_t* arg_values, unsigned int flags) {
+    const uint64_t* arg_values,
+    std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+    unsigned int flags) {
   if (tracing_controller_) {
     return tracing_controller_->AddTraceEvent(
         phase, category_enabled_flag, name, scope, id, bind_id, num_args,
-        arg_names, arg_types, arg_values, flags);
+        arg_names, arg_types, arg_values, arg_convertables, flags);
   }
 
   return 0;
@@ -218,12 +219,22 @@
 
 void DefaultPlatform::SetTracingController(
     tracing::TracingController* tracing_controller) {
-  tracing_controller_ = tracing_controller;
+  tracing_controller_.reset(tracing_controller);
 }
 
 size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
   return static_cast<size_t>(thread_pool_size_);
 }
 
+void DefaultPlatform::AddTraceStateObserver(TraceStateObserver* observer) {
+  if (!tracing_controller_) return;
+  tracing_controller_->AddTraceStateObserver(observer);
+}
+
+void DefaultPlatform::RemoveTraceStateObserver(TraceStateObserver* observer) {
+  if (!tracing_controller_) return;
+  tracing_controller_->RemoveTraceStateObserver(observer);
+}
+
 }  // namespace platform
 }  // namespace v8
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index 0fd7e5a..e36234f 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -7,6 +7,7 @@
 
 #include <functional>
 #include <map>
+#include <memory>
 #include <queue>
 #include <vector>
 
@@ -51,16 +52,21 @@
   const uint8_t* GetCategoryGroupEnabled(const char* name) override;
   const char* GetCategoryGroupName(
       const uint8_t* category_enabled_flag) override;
-  uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
-                         const char* name, const char* scope, uint64_t id,
-                         uint64_t bind_id, int32_t num_args,
-                         const char** arg_names, const uint8_t* arg_types,
-                         const uint64_t* arg_values,
-                         unsigned int flags) override;
+  using Platform::AddTraceEvent;
+  uint64_t AddTraceEvent(
+      char phase, const uint8_t* category_enabled_flag, const char* name,
+      const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+      const char** arg_names, const uint8_t* arg_types,
+      const uint64_t* arg_values,
+      std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+      unsigned int flags) override;
   void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
                                 const char* name, uint64_t handle) override;
   void SetTracingController(tracing::TracingController* tracing_controller);
 
+  void AddTraceStateObserver(TraceStateObserver* observer) override;
+  void RemoveTraceStateObserver(TraceStateObserver* observer) override;
+
  private:
   static const int kMaxThreadPoolSize;
 
@@ -79,7 +85,7 @@
            std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
                                std::greater<DelayedEntry> > >
       main_thread_delayed_queue_;
-  tracing::TracingController* tracing_controller_;
+  std::unique_ptr<tracing::TracingController> tracing_controller_;
 
   DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
 };
diff --git a/src/libplatform/tracing/trace-object.cc b/src/libplatform/tracing/trace-object.cc
index 55be892..bb4bf71 100644
--- a/src/libplatform/tracing/trace-object.cc
+++ b/src/libplatform/tracing/trace-object.cc
@@ -5,6 +5,7 @@
 #include "include/libplatform/v8-tracing.h"
 
 #include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/time.h"
 
@@ -30,11 +31,13 @@
   }
 }
 
-void TraceObject::Initialize(char phase, const uint8_t* category_enabled_flag,
-                             const char* name, const char* scope, uint64_t id,
-                             uint64_t bind_id, int num_args,
-                             const char** arg_names, const uint8_t* arg_types,
-                             const uint64_t* arg_values, unsigned int flags) {
+void TraceObject::Initialize(
+    char phase, const uint8_t* category_enabled_flag, const char* name,
+    const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+    const char** arg_names, const uint8_t* arg_types,
+    const uint64_t* arg_values,
+    std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+    unsigned int flags) {
   pid_ = base::OS::GetCurrentProcessId();
   tid_ = base::OS::GetCurrentThreadId();
   phase_ = phase;
@@ -55,6 +58,8 @@
     arg_names_[i] = arg_names[i];
     arg_values_[i].as_uint = arg_values[i];
     arg_types_[i] = arg_types[i];
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+      arg_convertables_[i] = std::move(arg_convertables[i]);
   }
 
   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
@@ -107,8 +112,10 @@
     char phase, const uint8_t* category_enabled_flag, const char* name,
     const char* scope, uint64_t id, uint64_t bind_id, int num_args,
     const char** arg_names, const uint8_t* arg_types,
-    const uint64_t* arg_values, unsigned int flags, int pid, int tid,
-    int64_t ts, int64_t tts, uint64_t duration, uint64_t cpu_duration) {
+    const uint64_t* arg_values,
+    std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+    unsigned int flags, int pid, int tid, int64_t ts, int64_t tts,
+    uint64_t duration, uint64_t cpu_duration) {
   pid_ = pid;
   tid_ = tid;
   phase_ = phase;
diff --git a/src/libplatform/tracing/trace-writer.cc b/src/libplatform/tracing/trace-writer.cc
index ec95527..7445087 100644
--- a/src/libplatform/tracing/trace-writer.cc
+++ b/src/libplatform/tracing/trace-writer.cc
@@ -7,6 +7,7 @@
 #include <cmath>
 
 #include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
 #include "src/base/platform/platform.h"
 
 namespace v8 {
@@ -112,6 +113,12 @@
   }
 }
 
+void JSONTraceWriter::AppendArgValue(ConvertableToTraceFormat* value) {
+  std::string arg_stringified;
+  value->AppendAsTraceFormat(&arg_stringified);
+  stream_ << arg_stringified;
+}
+
 JSONTraceWriter::JSONTraceWriter(std::ostream& stream) : stream_(stream) {
   stream_ << "{\"traceEvents\":[";
 }
@@ -143,10 +150,16 @@
   const char** arg_names = trace_event->arg_names();
   const uint8_t* arg_types = trace_event->arg_types();
   TraceObject::ArgValue* arg_values = trace_event->arg_values();
+  std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables =
+      trace_event->arg_convertables();
   for (int i = 0; i < trace_event->num_args(); ++i) {
     if (i > 0) stream_ << ",";
     stream_ << "\"" << arg_names[i] << "\":";
-    AppendArgValue(arg_types[i], arg_values[i]);
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+      AppendArgValue(arg_convertables[i].get());
+    } else {
+      AppendArgValue(arg_types[i], arg_values[i]);
+    }
   }
   stream_ << "}}";
   // TODO(fmeawad): Add support for Flow Events.
diff --git a/src/libplatform/tracing/trace-writer.h b/src/libplatform/tracing/trace-writer.h
index 963fc6a..43d7cb6 100644
--- a/src/libplatform/tracing/trace-writer.h
+++ b/src/libplatform/tracing/trace-writer.h
@@ -20,6 +20,7 @@
 
  private:
   void AppendArgValue(uint8_t type, TraceObject::ArgValue value);
+  void AppendArgValue(v8::ConvertableToTraceFormat*);
 
   std::ostream& stream_;
   bool append_comma_ = false;
diff --git a/src/libplatform/tracing/tracing-controller.cc b/src/libplatform/tracing/tracing-controller.cc
index e9a2172..c1a4057 100644
--- a/src/libplatform/tracing/tracing-controller.cc
+++ b/src/libplatform/tracing/tracing-controller.cc
@@ -38,21 +38,28 @@
 // Skip default categories.
 v8::base::AtomicWord g_category_index = g_num_builtin_categories;
 
+TracingController::TracingController() {}
+
+TracingController::~TracingController() {}
+
 void TracingController::Initialize(TraceBuffer* trace_buffer) {
   trace_buffer_.reset(trace_buffer);
+  mutex_.reset(new base::Mutex());
 }
 
 uint64_t TracingController::AddTraceEvent(
     char phase, const uint8_t* category_enabled_flag, const char* name,
     const char* scope, uint64_t id, uint64_t bind_id, int num_args,
     const char** arg_names, const uint8_t* arg_types,
-    const uint64_t* arg_values, unsigned int flags) {
+    const uint64_t* arg_values,
+    std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+    unsigned int flags) {
   uint64_t handle;
   TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
   if (trace_object) {
     trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
                              bind_id, num_args, arg_names, arg_types,
-                             arg_values, flags);
+                             arg_values, arg_convertables, flags);
   }
   return handle;
 }
@@ -91,13 +98,29 @@
 
 void TracingController::StartTracing(TraceConfig* trace_config) {
   trace_config_.reset(trace_config);
-  mode_ = RECORDING_MODE;
-  UpdateCategoryGroupEnabledFlags();
+  std::unordered_set<Platform::TraceStateObserver*> observers_copy;
+  {
+    base::LockGuard<base::Mutex> lock(mutex_.get());
+    mode_ = RECORDING_MODE;
+    UpdateCategoryGroupEnabledFlags();
+    observers_copy = observers_;
+  }
+  for (auto o : observers_copy) {
+    o->OnTraceEnabled();
+  }
 }
 
 void TracingController::StopTracing() {
   mode_ = DISABLED;
   UpdateCategoryGroupEnabledFlags();
+  std::unordered_set<Platform::TraceStateObserver*> observers_copy;
+  {
+    base::LockGuard<base::Mutex> lock(mutex_.get());
+    observers_copy = observers_;
+  }
+  for (auto o : observers_copy) {
+    o->OnTraceDisabled();
+  }
   trace_buffer_->Flush();
 }
 
@@ -172,6 +195,24 @@
   return category_group_enabled;
 }
 
+void TracingController::AddTraceStateObserver(
+    Platform::TraceStateObserver* observer) {
+  {
+    base::LockGuard<base::Mutex> lock(mutex_.get());
+    observers_.insert(observer);
+    if (mode_ != RECORDING_MODE) return;
+  }
+  // Fire the observer if recording is already in progress.
+  observer->OnTraceEnabled();
+}
+
+void TracingController::RemoveTraceStateObserver(
+    Platform::TraceStateObserver* observer) {
+  base::LockGuard<base::Mutex> lock(mutex_.get());
+  DCHECK(observers_.find(observer) != observers_.end());
+  observers_.erase(observer);
+}
+
 }  // namespace tracing
 }  // namespace platform
 }  // namespace v8
diff --git a/src/libsampler/sampler.cc b/src/libsampler/sampler.cc
index 71c667f..0b40972 100644
--- a/src/libsampler/sampler.cc
+++ b/src/libsampler/sampler.cc
@@ -217,7 +217,7 @@
 
 class SamplerManager {
  public:
-  SamplerManager() : sampler_map_(base::HashMap::PointersMatch) {}
+  SamplerManager() : sampler_map_() {}
 
   void AddSampler(Sampler* sampler) {
     AtomicGuard atomic_guard(&samplers_access_counter_);
diff --git a/src/lookup-cache-inl.h b/src/lookup-cache-inl.h
new file mode 100644
index 0000000..1998a9d
--- /dev/null
+++ b/src/lookup-cache-inl.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lookup-cache.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+int DescriptorLookupCache::Hash(Object* source, Name* name) {
+  DCHECK(name->IsUniqueName());
+  // Uses only lower 32 bits if pointers are larger.
+  uint32_t source_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+      kPointerSizeLog2;
+  uint32_t name_hash = name->hash_field();
+  return (source_hash ^ name_hash) % kLength;
+}
+
+int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+  int index = Hash(source, name);
+  Key& key = keys_[index];
+  if ((key.source == source) && (key.name == name)) return results_[index];
+  return kAbsent;
+}
+
+void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+  DCHECK(result != kAbsent);
+  int index = Hash(source, name);
+  Key& key = keys_[index];
+  key.source = source;
+  key.name = name;
+  results_[index] = result;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/lookup-cache.cc b/src/lookup-cache.cc
new file mode 100644
index 0000000..18729d6
--- /dev/null
+++ b/src/lookup-cache.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lookup-cache.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void DescriptorLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
+}
+
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  // Uses only lower 32 bits if pointers are larger.
+  uintptr_t addr_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
+  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
+}
+
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  int index = (Hash(map, name) & kHashMask);
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index + i];
+    if ((key.map == *map) && key.name->Equals(*name)) {
+      return field_offsets_[index + i];
+    }
+  }
+  return kNotFound;
+}
+
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
+                              int field_offset) {
+  DisallowHeapAllocation no_gc;
+  if (!name->IsUniqueName()) {
+    if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
+                                                Handle<String>::cast(name))
+             .ToHandle(&name)) {
+      return;
+    }
+  }
+  // This cache is cleared only between mark compact passes, so we expect the
+  // cache to only contain old space names.
+  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
+
+  int index = (Hash(map, name) & kHashMask);
+  // After a GC there will be free slots, so we use them in order (this may
+  // help to get the most frequently used one in position 0).
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index];
+    Object* free_entry_indicator = NULL;
+    if (key.map == free_entry_indicator) {
+      key.map = *map;
+      key.name = *name;
+      field_offsets_[index + i] = field_offset;
+      return;
+    }
+  }
+  // No free entry found in this bucket, so we move them all down one and
+  // put the new entry at position zero.
+  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+    Key& key = keys_[index + i];
+    Key& key2 = keys_[index + i - 1];
+    key = key2;
+    field_offsets_[index + i] = field_offsets_[index + i - 1];
+  }
+
+  // Write the new first entry.
+  Key& key = keys_[index];
+  key.map = *map;
+  key.name = *name;
+  field_offsets_[index] = field_offset;
+}
+
+void KeyedLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/lookup-cache.h b/src/lookup-cache.h
new file mode 100644
index 0000000..6da5e5b
--- /dev/null
+++ b/src/lookup-cache.h
@@ -0,0 +1,117 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOOKUP_CACHE_H_
+#define V8_LOOKUP_CACHE_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Cache for mapping (map, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+  // Lookup descriptor index for (map, name).
+  // If absent, kAbsent is returned.
+  inline int Lookup(Map* source, Name* name);
+
+  // Update an element in the cache.
+  inline void Update(Map* source, Name* name, int result);
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kAbsent = -2;
+
+ private:
+  DescriptorLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].source = NULL;
+      keys_[i].name = NULL;
+      results_[i] = kAbsent;
+    }
+  }
+
+  static inline int Hash(Object* source, Name* name);
+
+  static const int kLength = 64;
+  struct Key {
+    Map* source;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int results_[kLength];
+
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
+};
+
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+  // Lookup field offset for (map, name). If absent, -1 is returned.
+  int Lookup(Handle<Map> map, Handle<Name> name);
+
+  // Update an element in the cache.
+  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kLength = 256;
+  static const int kCapacityMask = kLength - 1;
+  static const int kMapHashShift = 5;
+  static const int kHashMask = -4;  // Zero the last two bits.
+  static const int kEntriesPerBucket = 4;
+  static const int kEntryLength = 2;
+  static const int kMapIndex = 0;
+  static const int kKeyIndex = 1;
+  static const int kNotFound = -1;
+
+  // kEntriesPerBucket should be a power of 2.
+  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
+ private:
+  KeyedLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].map = NULL;
+      keys_[i].name = NULL;
+      field_offsets_[i] = kNotFound;
+    }
+  }
+
+  static inline int Hash(Handle<Map> map, Handle<Name> name);
+
+  // Get the address of the keys and field_offsets arrays.  Used in
+  // generated code to perform cache lookups.
+  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
+
+  Address field_offsets_address() {
+    return reinterpret_cast<Address>(&field_offsets_);
+  }
+
+  struct Key {
+    Map* map;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int field_offsets_[kLength];
+
+  friend class ExternalReference;
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_LOOKUP_CACHE_H_
diff --git a/src/lookup.cc b/src/lookup.cc
index 54015d4..b6c0b92 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -13,7 +13,6 @@
 namespace v8 {
 namespace internal {
 
-
 // static
 LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
                                                  Handle<Object> receiver,
@@ -308,6 +307,11 @@
     PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
   DCHECK(receiver.is_identical_to(GetStoreTarget()));
   if (state_ == TRANSITION) return;
+
+  if (!IsElement() && name()->IsPrivate()) {
+    attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
+  }
+
   DCHECK(state_ != LookupIterator::ACCESSOR ||
          (GetAccessors()->IsAccessorInfo() &&
           AccessorInfo::cast(*GetAccessors())->is_special_data_property()));
@@ -416,11 +420,6 @@
         isolate_, is_prototype_map
                       ? &RuntimeCallStats::PrototypeObject_DeleteProperty
                       : &RuntimeCallStats::Object_DeleteProperty);
-    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-        isolate_,
-        (is_prototype_map
-             ? &tracing::TraceEventStatsTable::PrototypeObject_DeleteProperty
-             : &tracing::TraceEventStatsTable::Object_DeleteProperty));
 
     PropertyNormalizationMode mode =
         is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
@@ -447,6 +446,9 @@
   // handled via a trap. Adding properties to primitive values is not
   // observable.
   Handle<JSObject> receiver = GetStoreTarget();
+  if (!IsElement() && name()->IsPrivate()) {
+    attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
+  }
 
   if (!IsElement() && !receiver->map()->is_dictionary_map()) {
     Handle<Map> old_map(receiver->map(), isolate_);
diff --git a/src/lookup.h b/src/lookup.h
index ffc7904..687c677 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -43,30 +43,26 @@
 
   LookupIterator(Handle<Object> receiver, Handle<Name> name,
                  Configuration configuration = DEFAULT)
-      : configuration_(ComputeConfiguration(configuration, name)),
-        interceptor_state_(InterceptorState::kUninitialized),
-        property_details_(PropertyDetails::Empty()),
-        isolate_(name->GetIsolate()),
-        name_(isolate_->factory()->InternalizeName(name)),
-        receiver_(receiver),
-        initial_holder_(GetRoot(isolate_, receiver)),
-        // kMaxUInt32 isn't a valid index.
-        index_(kMaxUInt32),
-        number_(DescriptorArray::kNotFound) {
-#ifdef DEBUG
-    uint32_t index;  // Assert that the name is not an array index.
-    DCHECK(!name->AsArrayIndex(&index));
-#endif  // DEBUG
-    Start<false>();
-  }
+      : LookupIterator(name->GetIsolate(), receiver, name, configuration) {}
+
+  LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+                 Configuration configuration = DEFAULT)
+      : LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
+                       configuration) {}
 
   LookupIterator(Handle<Object> receiver, Handle<Name> name,
                  Handle<JSReceiver> holder,
                  Configuration configuration = DEFAULT)
+      : LookupIterator(name->GetIsolate(), receiver, name, holder,
+                       configuration) {}
+
+  LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+                 Handle<JSReceiver> holder,
+                 Configuration configuration = DEFAULT)
       : configuration_(ComputeConfiguration(configuration, name)),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
-        isolate_(name->GetIsolate()),
+        isolate_(isolate),
         name_(isolate_->factory()->InternalizeName(name)),
         receiver_(receiver),
         initial_holder_(holder),
@@ -82,18 +78,8 @@
 
   LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
                  Configuration configuration = DEFAULT)
-      : configuration_(configuration),
-        interceptor_state_(InterceptorState::kUninitialized),
-        property_details_(PropertyDetails::Empty()),
-        isolate_(isolate),
-        receiver_(receiver),
-        initial_holder_(GetRoot(isolate, receiver, index)),
-        index_(index),
-        number_(DescriptorArray::kNotFound) {
-    // kMaxUInt32 isn't a valid index.
-    DCHECK_NE(kMaxUInt32, index_);
-    Start<true>();
-  }
+      : LookupIterator(isolate, receiver, index,
+                       GetRoot(isolate, receiver, index), configuration) {}
 
   LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
                  Handle<JSReceiver> holder,
@@ -289,7 +275,7 @@
   MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
 
   template <bool is_element>
-  void Start();
+  V8_EXPORT_PRIVATE void Start();
   template <bool is_element>
   void NextInternal(Map* map, JSReceiver* holder);
   template <bool is_element>
diff --git a/src/machine-type.h b/src/machine-type.h
index bcc85b3..e9605d7 100644
--- a/src/machine-type.h
+++ b/src/machine-type.h
@@ -10,7 +10,7 @@
 #include "src/base/bits.h"
 #include "src/globals.h"
 #include "src/signature.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -22,12 +22,14 @@
   kWord16,
   kWord32,
   kWord64,
-  kFloat32,
-  kFloat64,  // must follow kFloat32
-  kSimd128,  // must follow kFloat64
   kTaggedSigned,
   kTaggedPointer,
-  kTagged
+  kTagged,
+  // FP representations must be last, and in order of increasing size.
+  kFloat32,
+  kFloat64,
+  kSimd128,
+  kFirstFPRepresentation = kFloat32
 };
 
 const char* MachineReprToString(MachineRepresentation);
@@ -62,6 +64,8 @@
   MachineRepresentation representation() const { return representation_; }
   MachineSemantic semantic() const { return semantic_; }
 
+  bool IsNone() { return representation() == MachineRepresentation::kNone; }
+
   bool IsSigned() {
     return semantic() == MachineSemantic::kInt32 ||
            semantic() == MachineSemantic::kInt64;
@@ -119,6 +123,14 @@
     return MachineType(MachineRepresentation::kWord64,
                        MachineSemantic::kUint64);
   }
+  static MachineType TaggedPointer() {
+    return MachineType(MachineRepresentation::kTaggedPointer,
+                       MachineSemantic::kAny);
+  }
+  static MachineType TaggedSigned() {
+    return MachineType(MachineRepresentation::kTaggedSigned,
+                       MachineSemantic::kInt32);
+  }
   static MachineType AnyTagged() {
     return MachineType(MachineRepresentation::kTagged, MachineSemantic::kAny);
   }
@@ -161,7 +173,7 @@
     return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
   }
 
-  static MachineType TypeForRepresentation(MachineRepresentation& rep,
+  static MachineType TypeForRepresentation(const MachineRepresentation& rep,
                                            bool isSigned = true) {
     switch (rep) {
       case MachineRepresentation::kNone:
@@ -184,6 +196,10 @@
         return MachineType::Simd128();
       case MachineRepresentation::kTagged:
         return MachineType::AnyTagged();
+      case MachineRepresentation::kTaggedSigned:
+        return MachineType::TaggedSigned();
+      case MachineRepresentation::kTaggedPointer:
+        return MachineType::TaggedPointer();
       default:
         UNREACHABLE();
         return MachineType::None();
@@ -204,14 +220,22 @@
          static_cast<size_t>(type.semantic()) * 16;
 }
 
-std::ostream& operator<<(std::ostream& os, MachineRepresentation rep);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+                                           MachineRepresentation rep);
 std::ostream& operator<<(std::ostream& os, MachineSemantic type);
 std::ostream& operator<<(std::ostream& os, MachineType type);
 
 inline bool IsFloatingPoint(MachineRepresentation rep) {
-  return rep == MachineRepresentation::kFloat32 ||
-         rep == MachineRepresentation::kFloat64 ||
-         rep == MachineRepresentation::kSimd128;
+  return rep >= MachineRepresentation::kFirstFPRepresentation;
+}
+
+inline bool CanBeTaggedPointer(MachineRepresentation rep) {
+  return rep == MachineRepresentation::kTagged ||
+         rep == MachineRepresentation::kTaggedPointer;
+}
+
+inline bool IsAnyTagged(MachineRepresentation rep) {
+  return CanBeTaggedPointer(rep) || rep == MachineRepresentation::kTaggedSigned;
 }
 
 // Gets the log2 of the element size in bytes of the machine type.
diff --git a/src/messages.cc b/src/messages.cc
index 5d03318..cc6349d 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -164,71 +164,63 @@
   return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
 }
 
+void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
+                                  int frame_ix) {
+  DCHECK(!array->IsWasmFrame(frame_ix));
+  isolate_ = isolate;
+  receiver_ = handle(array->Receiver(frame_ix), isolate);
+  function_ = handle(array->Function(frame_ix), isolate);
+  code_ = handle(array->Code(frame_ix), isolate);
+  offset_ = array->Offset(frame_ix)->value();
 
-CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
-    : isolate_(isolate) {
-  Handle<Object> maybe_function = JSObject::GetDataProperty(
-      call_site_obj, isolate->factory()->call_site_function_symbol());
-  if (maybe_function->IsJSFunction()) {
-    // javascript
-    fun_ = Handle<JSFunction>::cast(maybe_function);
-    receiver_ = JSObject::GetDataProperty(
-        call_site_obj, isolate->factory()->call_site_receiver_symbol());
-  } else {
-    Handle<Object> maybe_wasm_func_index = JSObject::GetDataProperty(
-        call_site_obj, isolate->factory()->call_site_wasm_func_index_symbol());
-    if (!maybe_wasm_func_index->IsSmi()) {
-      // invalid: neither javascript nor wasm
-      return;
-    }
-    // wasm
-    wasm_obj_ = Handle<JSObject>::cast(JSObject::GetDataProperty(
-        call_site_obj, isolate->factory()->call_site_wasm_obj_symbol()));
-    wasm_func_index_ = Smi::cast(*maybe_wasm_func_index)->value();
-    DCHECK(static_cast<int>(wasm_func_index_) >= 0);
-  }
-
-  CHECK(JSObject::GetDataProperty(
-            call_site_obj, isolate->factory()->call_site_position_symbol())
-            ->ToInt32(&pos_));
+  const int flags = array->Flags(frame_ix)->value();
+  force_constructor_ = (flags & FrameArray::kForceConstructor) != 0;
+  is_strict_ = (flags & FrameArray::kIsStrict) != 0;
 }
 
+JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
+                           Handle<JSFunction> function,
+                           Handle<AbstractCode> code, int offset)
+    : isolate_(isolate),
+      receiver_(receiver),
+      function_(function),
+      code_(code),
+      offset_(offset),
+      force_constructor_(false),
+      is_strict_(false) {}
 
-Handle<Object> CallSite::GetFileName() {
-  if (!IsJavaScript()) return isolate_->factory()->null_value();
-  Object* script = fun_->shared()->script();
-  if (!script->IsScript()) return isolate_->factory()->null_value();
-  return Handle<Object>(Script::cast(script)->name(), isolate_);
+JSStackFrame::JSStackFrame() {}
+
+Handle<Object> JSStackFrame::GetFunction() const {
+  return Handle<Object>::cast(function_);
 }
 
+Handle<Object> JSStackFrame::GetFileName() {
+  if (!HasScript()) return isolate_->factory()->null_value();
+  return handle(GetScript()->name(), isolate_);
+}
 
-Handle<Object> CallSite::GetFunctionName() {
-  if (IsWasm()) {
-    return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_,
-                                           wasm_func_index_);
-  }
-  Handle<String> result = JSFunction::GetName(fun_);
+Handle<Object> JSStackFrame::GetFunctionName() {
+  Handle<String> result = JSFunction::GetName(function_);
   if (result->length() != 0) return result;
 
-  Handle<Object> script(fun_->shared()->script(), isolate_);
-  if (script->IsScript() &&
-      Handle<Script>::cast(script)->compilation_type() ==
-          Script::COMPILATION_TYPE_EVAL) {
+  if (HasScript() &&
+      GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
     return isolate_->factory()->eval_string();
   }
   return isolate_->factory()->null_value();
 }
 
-Handle<Object> CallSite::GetScriptNameOrSourceUrl() {
-  if (!IsJavaScript()) return isolate_->factory()->null_value();
-  Object* script_obj = fun_->shared()->script();
-  if (!script_obj->IsScript()) return isolate_->factory()->null_value();
-  Handle<Script> script(Script::cast(script_obj), isolate_);
+Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
+  if (!HasScript()) return isolate_->factory()->null_value();
+  Handle<Script> script = GetScript();
   Object* source_url = script->source_url();
-  if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
-  return Handle<Object>(script->name(), isolate_);
+  return (source_url->IsString()) ? handle(source_url, isolate_)
+                                  : handle(script->name(), isolate_);
 }
 
+namespace {
+
 bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
                      Handle<JSFunction> fun,
                      LookupIterator::Configuration config) {
@@ -246,12 +238,13 @@
   return false;
 }
 
+}  // namespace
 
-Handle<Object> CallSite::GetMethodName() {
-  if (!IsJavaScript() || receiver_->IsNull(isolate_) ||
-      receiver_->IsUndefined(isolate_)) {
+Handle<Object> JSStackFrame::GetMethodName() {
+  if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
     return isolate_->factory()->null_value();
   }
+
   Handle<JSReceiver> receiver =
       Object::ToObject(isolate_, receiver_).ToHandleChecked();
   if (!receiver->IsJSObject()) {
@@ -259,7 +252,7 @@
   }
 
   Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
-  Handle<Object> function_name(fun_->shared()->name(), isolate_);
+  Handle<Object> function_name(function_->shared()->name(), isolate_);
   if (function_name->IsString()) {
     Handle<String> name = Handle<String>::cast(function_name);
     // ES2015 gives getters and setters name prefixes which must
@@ -268,7 +261,7 @@
         name->IsUtf8EqualTo(CStrVector("set "), true)) {
       name = isolate_->factory()->NewProperSubString(name, 4, name->length());
     }
-    if (CheckMethodName(isolate_, obj, name, fun_,
+    if (CheckMethodName(isolate_, obj, name, function_,
                         LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
       return name;
     }
@@ -288,7 +281,7 @@
       HandleScope inner_scope(isolate_);
       if (!keys->get(i)->IsName()) continue;
       Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
-      if (!CheckMethodName(isolate_, current_obj, name_key, fun_,
+      if (!CheckMethodName(isolate_, current_obj, name_key, function_,
                            LookupIterator::OWN_SKIP_INTERCEPTOR))
         continue;
       // Return null in case of duplicates to avoid confusion.
@@ -301,20 +294,6 @@
   return isolate_->factory()->null_value();
 }
 
-Handle<Object> CallSite::GetTypeName() {
-  // TODO(jgruber): Check for strict/constructor here as in
-  // CallSitePrototypeGetThis.
-
-  if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
-    return isolate_->factory()->null_value();
-
-  if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
-
-  Handle<JSReceiver> receiver_object =
-      Object::ToObject(isolate_, receiver_).ToHandleChecked();
-  return JSReceiver::GetConstructorName(receiver_object);
-}
-
 namespace {
 
 Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
@@ -414,126 +393,344 @@
 
 }  // namespace
 
-Handle<Object> CallSite::GetEvalOrigin() {
-  if (IsWasm()) return isolate_->factory()->undefined_value();
-  DCHECK(IsJavaScript());
+Handle<Object> JSStackFrame::GetTypeName() {
+  // TODO(jgruber): Check for strict/constructor here as in
+  // CallSitePrototypeGetThis.
 
-  Handle<Object> script = handle(fun_->shared()->script(), isolate_);
-  if (!script->IsScript()) return isolate_->factory()->undefined_value();
+  if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
+    return isolate_->factory()->null_value();
 
-  return FormatEvalOrigin(isolate_, Handle<Script>::cast(script))
-      .ToHandleChecked();
+  if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
+
+  Handle<JSReceiver> receiver_object =
+      Object::ToObject(isolate_, receiver_).ToHandleChecked();
+  return JSReceiver::GetConstructorName(receiver_object);
 }
 
-int CallSite::GetLineNumber() {
-  if (pos_ >= 0 && IsJavaScript()) {
-    Handle<Object> script_obj(fun_->shared()->script(), isolate_);
-    if (script_obj->IsScript()) {
-      Handle<Script> script = Handle<Script>::cast(script_obj);
-      return Script::GetLineNumber(script, pos_) + 1;
-    }
+Handle<Object> JSStackFrame::GetEvalOrigin() {
+  if (!HasScript()) return isolate_->factory()->undefined_value();
+  return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
+}
+
+int JSStackFrame::GetLineNumber() {
+  DCHECK_LE(0, GetPosition());
+  if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
+  return -1;
+}
+
+int JSStackFrame::GetColumnNumber() {
+  DCHECK_LE(0, GetPosition());
+  if (HasScript()) {
+    return Script::GetColumnNumber(GetScript(), GetPosition()) + 1;
   }
   return -1;
 }
 
-
-int CallSite::GetColumnNumber() {
-  if (pos_ >= 0 && IsJavaScript()) {
-    Handle<Object> script_obj(fun_->shared()->script(), isolate_);
-    if (script_obj->IsScript()) {
-      Handle<Script> script = Handle<Script>::cast(script_obj);
-      return Script::GetColumnNumber(script, pos_) + 1;
-    }
-  }
-  return -1;
+bool JSStackFrame::IsNative() {
+  return HasScript() && GetScript()->type() == Script::TYPE_NATIVE;
 }
 
-
-bool CallSite::IsNative() {
-  if (!IsJavaScript()) return false;
-  Handle<Object> script(fun_->shared()->script(), isolate_);
-  return script->IsScript() &&
-         Handle<Script>::cast(script)->type() == Script::TYPE_NATIVE;
-}
-
-
-bool CallSite::IsToplevel() {
-  if (IsWasm()) return false;
+bool JSStackFrame::IsToplevel() {
   return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
          receiver_->IsUndefined(isolate_);
 }
 
-
-bool CallSite::IsEval() {
-  if (!IsJavaScript()) return false;
-  Handle<Object> script(fun_->shared()->script(), isolate_);
-  return script->IsScript() &&
-         Handle<Script>::cast(script)->compilation_type() ==
-             Script::COMPILATION_TYPE_EVAL;
+bool JSStackFrame::IsEval() {
+  return HasScript() &&
+         GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
 }
 
-
-bool CallSite::IsConstructor() {
-  // Builtin exit frames mark constructors by passing a special symbol as the
-  // receiver.
-  Object* ctor_symbol = isolate_->heap()->call_site_constructor_symbol();
-  if (*receiver_ == ctor_symbol) return true;
-  if (!IsJavaScript() || !receiver_->IsJSObject()) return false;
+bool JSStackFrame::IsConstructor() {
+  if (force_constructor_) return true;
+  if (!receiver_->IsJSObject()) return false;
   Handle<Object> constructor =
       JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
                                   isolate_->factory()->constructor_string());
-  return constructor.is_identical_to(fun_);
+  return constructor.is_identical_to(function_);
 }
 
 namespace {
 
-// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
-// a vector of JS CallSite objects.
-MaybeHandle<FixedArray> GetStackFrames(Isolate* isolate,
-                                       Handle<Object> raw_stack) {
-  DCHECK(raw_stack->IsJSArray());
-  Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
+bool IsNonEmptyString(Handle<Object> object) {
+  return (object->IsString() && String::cast(*object)->length() > 0);
+}
 
-  DCHECK(raw_stack_array->elements()->IsFixedArray());
-  Handle<FixedArray> raw_stack_elements =
-      handle(FixedArray::cast(raw_stack_array->elements()), isolate);
-
-  const int raw_stack_len = raw_stack_elements->length();
-  DCHECK(raw_stack_len % 4 == 1);  // Multiples of 4 plus sloppy frames count.
-  const int frame_count = (raw_stack_len - 1) / 4;
-
-  Handle<Object> sloppy_frames_obj =
-      FixedArray::get(*raw_stack_elements, 0, isolate);
-  int sloppy_frames = Handle<Smi>::cast(sloppy_frames_obj)->value();
-
-  int dst_ix = 0;
-  Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
-  for (int i = 1; i < raw_stack_len; i += 4) {
-    Handle<Object> recv = FixedArray::get(*raw_stack_elements, i, isolate);
-    Handle<Object> fun = FixedArray::get(*raw_stack_elements, i + 1, isolate);
-    Handle<AbstractCode> code = Handle<AbstractCode>::cast(
-        FixedArray::get(*raw_stack_elements, i + 2, isolate));
-    Handle<Smi> pc =
-        Handle<Smi>::cast(FixedArray::get(*raw_stack_elements, i + 3, isolate));
-
-    Handle<Object> pos =
-        (fun->IsSmi() && pc->value() < 0)
-            ? handle(Smi::FromInt(-1 - pc->value()), isolate)
-            : handle(Smi::FromInt(code->SourcePosition(pc->value())), isolate);
-
-    sloppy_frames--;
-    Handle<Object> strict = isolate->factory()->ToBoolean(sloppy_frames < 0);
-
-    Handle<Object> callsite;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, callsite,
-        CallSiteUtils::Construct(isolate, recv, fun, pos, strict), FixedArray);
-
-    frames->set(dst_ix++, *callsite);
+void AppendFileLocation(Isolate* isolate, JSStackFrame* call_site,
+                        IncrementalStringBuilder* builder) {
+  if (call_site->IsNative()) {
+    builder->AppendCString("native");
+    return;
   }
 
-  DCHECK_EQ(frame_count, dst_ix);
-  return frames;
+  Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
+  if (!file_name->IsString() && call_site->IsEval()) {
+    Handle<Object> eval_origin = call_site->GetEvalOrigin();
+    DCHECK(eval_origin->IsString());
+    builder->AppendString(Handle<String>::cast(eval_origin));
+    builder->AppendCString(", ");  // Expecting source position to follow.
+  }
+
+  if (IsNonEmptyString(file_name)) {
+    builder->AppendString(Handle<String>::cast(file_name));
+  } else {
+    // Source code does not originate from a file and is not native, but we
+    // can still get the source position inside the source string, e.g. in
+    // an eval string.
+    builder->AppendCString("<anonymous>");
+  }
+
+  int line_number = call_site->GetLineNumber();
+  if (line_number != -1) {
+    builder->AppendCharacter(':');
+    Handle<String> line_string = isolate->factory()->NumberToString(
+        handle(Smi::FromInt(line_number), isolate), isolate);
+    builder->AppendString(line_string);
+
+    int column_number = call_site->GetColumnNumber();
+    if (column_number != -1) {
+      builder->AppendCharacter(':');
+      Handle<String> column_string = isolate->factory()->NumberToString(
+          handle(Smi::FromInt(column_number), isolate), isolate);
+      builder->AppendString(column_string);
+    }
+  }
+}
+
+int StringIndexOf(Isolate* isolate, Handle<String> subject,
+                  Handle<String> pattern) {
+  if (pattern->length() > subject->length()) return -1;
+  return String::IndexOf(isolate, subject, pattern, 0);
+}
+
+// Returns true iff
+// 1. the subject ends with '.' + pattern, or
+// 2. subject == pattern.
+bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
+                              Handle<String> pattern) {
+  if (String::Equals(subject, pattern)) return true;
+
+  FlatStringReader subject_reader(isolate, String::Flatten(subject));
+  FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
+
+  int pattern_index = pattern_reader.length() - 1;
+  int subject_index = subject_reader.length() - 1;
+  for (int i = 0; i <= pattern_reader.length(); i++) {  // Iterate over len + 1.
+    if (subject_index < 0) {
+      return false;
+    }
+
+    const uc32 subject_char = subject_reader.Get(subject_index);
+    if (i == pattern_reader.length()) {
+      if (subject_char != '.') return false;
+    } else if (subject_char != pattern_reader.Get(pattern_index)) {
+      return false;
+    }
+
+    pattern_index--;
+    subject_index--;
+  }
+
+  return true;
+}
+
+void AppendMethodCall(Isolate* isolate, JSStackFrame* call_site,
+                      IncrementalStringBuilder* builder) {
+  Handle<Object> type_name = call_site->GetTypeName();
+  Handle<Object> method_name = call_site->GetMethodName();
+  Handle<Object> function_name = call_site->GetFunctionName();
+
+  if (IsNonEmptyString(function_name)) {
+    Handle<String> function_string = Handle<String>::cast(function_name);
+    if (IsNonEmptyString(type_name)) {
+      Handle<String> type_string = Handle<String>::cast(type_name);
+      bool starts_with_type_name =
+          (StringIndexOf(isolate, function_string, type_string) == 0);
+      if (!starts_with_type_name) {
+        builder->AppendString(type_string);
+        builder->AppendCharacter('.');
+      }
+    }
+    builder->AppendString(function_string);
+
+    if (IsNonEmptyString(method_name)) {
+      Handle<String> method_string = Handle<String>::cast(method_name);
+      if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
+        builder->AppendCString(" [as ");
+        builder->AppendString(method_string);
+        builder->AppendCharacter(']');
+      }
+    }
+  } else {
+    builder->AppendString(Handle<String>::cast(type_name));
+    builder->AppendCharacter('.');
+    if (IsNonEmptyString(method_name)) {
+      builder->AppendString(Handle<String>::cast(method_name));
+    } else {
+      builder->AppendCString("<anonymous>");
+    }
+  }
+}
+
+}  // namespace
+
+MaybeHandle<String> JSStackFrame::ToString() {
+  IncrementalStringBuilder builder(isolate_);
+
+  Handle<Object> function_name = GetFunctionName();
+
+  const bool is_toplevel = IsToplevel();
+  const bool is_constructor = IsConstructor();
+  const bool is_method_call = !(is_toplevel || is_constructor);
+
+  if (is_method_call) {
+    AppendMethodCall(isolate_, this, &builder);
+  } else if (is_constructor) {
+    builder.AppendCString("new ");
+    if (IsNonEmptyString(function_name)) {
+      builder.AppendString(Handle<String>::cast(function_name));
+    } else {
+      builder.AppendCString("<anonymous>");
+    }
+  } else if (IsNonEmptyString(function_name)) {
+    builder.AppendString(Handle<String>::cast(function_name));
+  } else {
+    AppendFileLocation(isolate_, this, &builder);
+    RETURN_RESULT(isolate_, builder.Finish(), String);
+  }
+
+  builder.AppendCString(" (");
+  AppendFileLocation(isolate_, this, &builder);
+  builder.AppendCString(")");
+
+  RETURN_RESULT(isolate_, builder.Finish(), String);
+}
+
+int JSStackFrame::GetPosition() const { return code_->SourcePosition(offset_); }
+
+bool JSStackFrame::HasScript() const {
+  return function_->shared()->script()->IsScript();
+}
+
+Handle<Script> JSStackFrame::GetScript() const {
+  return handle(Script::cast(function_->shared()->script()), isolate_);
+}
+
+void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
+                                    int frame_ix) {
+  DCHECK(array->IsWasmFrame(frame_ix));
+  isolate_ = isolate;
+  wasm_obj_ = handle(array->WasmObject(frame_ix), isolate);
+  wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
+  code_ = handle(array->Code(frame_ix), isolate);
+  offset_ = array->Offset(frame_ix)->value();
+}
+
+Handle<Object> WasmStackFrame::GetFunction() const {
+  Handle<Object> obj(Smi::FromInt(wasm_func_index_), isolate_);
+  return obj;
+}
+
+Handle<Object> WasmStackFrame::GetFunctionName() {
+  return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_, wasm_func_index_);
+}
+
+MaybeHandle<String> WasmStackFrame::ToString() {
+  IncrementalStringBuilder builder(isolate_);
+
+  Handle<Object> name = GetFunctionName();
+  if (name->IsNull(isolate_)) {
+    builder.AppendCString("<WASM UNNAMED>");
+  } else {
+    DCHECK(name->IsString());
+    builder.AppendString(Handle<String>::cast(name));
+  }
+
+  builder.AppendCString(" (<WASM>[");
+
+  Handle<Smi> ix(Smi::FromInt(wasm_func_index_), isolate_);
+  builder.AppendString(isolate_->factory()->NumberToString(ix));
+
+  builder.AppendCString("]+");
+
+  Handle<Object> pos(Smi::FromInt(GetPosition()), isolate_);
+  builder.AppendString(isolate_->factory()->NumberToString(pos));
+  builder.AppendCString(")");
+
+  return builder.Finish();
+}
+
+int WasmStackFrame::GetPosition() const {
+  return (offset_ < 0) ? (-1 - offset_) : code_->SourcePosition(offset_);
+}
+
+Handle<Object> WasmStackFrame::Null() const {
+  return isolate_->factory()->null_value();
+}
+
+FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
+                                       Handle<FrameArray> array, int frame_ix)
+    : isolate_(isolate), array_(array), next_frame_ix_(frame_ix) {}
+
+bool FrameArrayIterator::HasNext() const {
+  return (next_frame_ix_ < array_->FrameCount());
+}
+
+void FrameArrayIterator::Next() { next_frame_ix_++; }
+
+StackFrameBase* FrameArrayIterator::Frame() {
+  DCHECK(HasNext());
+  const int flags = array_->Flags(next_frame_ix_)->value();
+  const bool is_js_frame = (flags & FrameArray::kIsWasmFrame) == 0;
+  if (is_js_frame) {
+    js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+    return &js_frame_;
+  } else {
+    wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+    return &wasm_frame_;
+  }
+}
+
+namespace {
+
+MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
+                                      Handle<FrameArray> frame_array,
+                                      int frame_index) {
+  Handle<JSFunction> target =
+      handle(isolate->native_context()->callsite_function(), isolate);
+
+  Handle<JSObject> obj;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
+                             Object);
+
+  Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
+  RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                   obj, key, frame_array, DONT_ENUM),
+                      Object);
+
+  key = isolate->factory()->call_site_frame_index_symbol();
+  Handle<Object> value(Smi::FromInt(frame_index), isolate);
+  RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                   obj, key, value, DONT_ENUM),
+                      Object);
+
+  return obj;
+}
+
+// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
+// a JSArray of JSCallSite objects.
+MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
+                                    Handle<FrameArray> elems) {
+  const int frame_count = elems->FrameCount();
+
+  Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
+  for (int i = 0; i < frame_count; i++) {
+    Handle<Object> site;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, site,
+                               ConstructCallSite(isolate, elems, i), JSArray);
+    frames->set(i, *site);
+  }
+
+  return isolate->factory()->NewJSArrayWithElements(frames);
 }
 
 MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
@@ -590,11 +787,11 @@
 MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
                                                  Handle<JSObject> error,
                                                  Handle<Object> raw_stack) {
-  // Create JS CallSite objects from the raw stack frame array.
+  DCHECK(raw_stack->IsJSArray());
+  Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
 
-  Handle<FixedArray> frames;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, frames,
-                             GetStackFrames(isolate, raw_stack), Object);
+  DCHECK(raw_stack_array->elements()->IsFixedArray());
+  Handle<FrameArray> elems(FrameArray::cast(raw_stack_array->elements()));
 
   // If there's a user-specified "prepareStackFrames" function, call it on the
   // frames and use its result.
@@ -609,12 +806,16 @@
   const bool in_recursion = isolate->formatting_stack_trace();
   if (prepare_stack_trace->IsJSFunction() && !in_recursion) {
     PrepareStackTraceScope scope(isolate);
-    Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(frames);
+
+    Handle<JSArray> sites;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, sites, GetStackFrames(isolate, elems),
+                               Object);
 
     const int argc = 2;
     ScopedVector<Handle<Object>> argv(argc);
+
     argv[0] = error;
-    argv[1] = array;
+    argv[1] = sites;
 
     Handle<Object> result;
     ASSIGN_RETURN_ON_EXCEPTION(
@@ -625,17 +826,18 @@
     return result;
   }
 
+  // Otherwise, run our internal formatting logic.
+
   IncrementalStringBuilder builder(isolate);
 
   RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
                       Object);
 
-  for (int i = 0; i < frames->length(); i++) {
+  for (FrameArrayIterator it(isolate, elems); it.HasNext(); it.Next()) {
     builder.AppendCString("\n    at ");
 
-    Handle<Object> frame = FixedArray::get(*frames, i, isolate);
-    MaybeHandle<String> maybe_frame_string =
-        CallSiteUtils::ToString(isolate, frame);
+    StackFrameBase* frame = it.Frame();
+    MaybeHandle<String> maybe_frame_string = frame->ToString();
     if (maybe_frame_string.is_null()) {
       // CallSite.toString threw. Try to return a string representation of the
       // thrown exception instead.
@@ -902,290 +1104,5 @@
                                no_caller, false);
 }
 
-#define SET_CALLSITE_PROPERTY(target, key, value)                        \
-  RETURN_ON_EXCEPTION(                                                   \
-      isolate, JSObject::SetOwnPropertyIgnoreAttributes(                 \
-                   target, isolate->factory()->key(), value, DONT_ENUM), \
-      Object)
-
-MaybeHandle<Object> CallSiteUtils::Construct(Isolate* isolate,
-                                             Handle<Object> receiver,
-                                             Handle<Object> fun,
-                                             Handle<Object> pos,
-                                             Handle<Object> strict_mode) {
-  // Create the JS object.
-
-  Handle<JSFunction> target =
-      handle(isolate->native_context()->callsite_function(), isolate);
-
-  Handle<JSObject> obj;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
-                             Object);
-
-  // For wasm frames, receiver is the wasm object and fun is the function index
-  // instead of an actual function.
-  const bool is_wasm_object =
-      receiver->IsJSObject() && wasm::IsWasmObject(JSObject::cast(*receiver));
-  if (!fun->IsJSFunction() && !is_wasm_object) {
-    THROW_NEW_ERROR(isolate,
-                    NewTypeError(MessageTemplate::kCallSiteExpectsFunction,
-                                 Object::TypeOf(isolate, receiver),
-                                 Object::TypeOf(isolate, fun)),
-                    Object);
-  }
-
-  if (is_wasm_object) {
-    DCHECK(fun->IsSmi());
-    DCHECK(wasm::GetNumberOfFunctions(JSObject::cast(*receiver)) >
-           Smi::cast(*fun)->value());
-
-    SET_CALLSITE_PROPERTY(obj, call_site_wasm_obj_symbol, receiver);
-    SET_CALLSITE_PROPERTY(obj, call_site_wasm_func_index_symbol, fun);
-  } else {
-    DCHECK(fun->IsJSFunction());
-    SET_CALLSITE_PROPERTY(obj, call_site_receiver_symbol, receiver);
-    SET_CALLSITE_PROPERTY(obj, call_site_function_symbol, fun);
-  }
-
-  DCHECK(pos->IsSmi());
-  SET_CALLSITE_PROPERTY(obj, call_site_position_symbol, pos);
-  SET_CALLSITE_PROPERTY(
-      obj, call_site_strict_symbol,
-      isolate->factory()->ToBoolean(strict_mode->BooleanValue()));
-
-  return obj;
-}
-
-#undef SET_CALLSITE_PROPERTY
-
-namespace {
-
-bool IsNonEmptyString(Handle<Object> object) {
-  return (object->IsString() && String::cast(*object)->length() > 0);
-}
-
-MaybeHandle<JSObject> AppendWasmToString(Isolate* isolate,
-                                         Handle<JSObject> recv,
-                                         CallSite* call_site,
-                                         IncrementalStringBuilder* builder) {
-  Handle<Object> name = call_site->GetFunctionName();
-  if (name->IsNull(isolate)) {
-    builder->AppendCString("<WASM UNNAMED>");
-  } else {
-    DCHECK(name->IsString());
-    builder->AppendString(Handle<String>::cast(name));
-  }
-
-  builder->AppendCString(" (<WASM>[");
-
-  Handle<String> ix = isolate->factory()->NumberToString(
-      handle(Smi::FromInt(call_site->wasm_func_index()), isolate));
-  builder->AppendString(ix);
-
-  builder->AppendCString("]+");
-
-  Handle<Object> pos;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, pos, JSObject::GetProperty(
-                        recv, isolate->factory()->call_site_position_symbol()),
-      JSObject);
-  DCHECK(pos->IsNumber());
-  builder->AppendString(isolate->factory()->NumberToString(pos));
-  builder->AppendCString(")");
-
-  return recv;
-}
-
-MaybeHandle<JSObject> AppendFileLocation(Isolate* isolate,
-                                         Handle<JSObject> recv,
-                                         CallSite* call_site,
-                                         IncrementalStringBuilder* builder) {
-  if (call_site->IsNative()) {
-    builder->AppendCString("native");
-    return recv;
-  }
-
-  Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
-  if (!file_name->IsString() && call_site->IsEval()) {
-    Handle<Object> eval_origin = call_site->GetEvalOrigin();
-    DCHECK(eval_origin->IsString());
-    builder->AppendString(Handle<String>::cast(eval_origin));
-    builder->AppendCString(", ");  // Expecting source position to follow.
-  }
-
-  if (IsNonEmptyString(file_name)) {
-    builder->AppendString(Handle<String>::cast(file_name));
-  } else {
-    // Source code does not originate from a file and is not native, but we
-    // can still get the source position inside the source string, e.g. in
-    // an eval string.
-    builder->AppendCString("<anonymous>");
-  }
-
-  int line_number = call_site->GetLineNumber();
-  if (line_number != -1) {
-    builder->AppendCharacter(':');
-    Handle<String> line_string = isolate->factory()->NumberToString(
-        handle(Smi::FromInt(line_number), isolate), isolate);
-    builder->AppendString(line_string);
-
-    int column_number = call_site->GetColumnNumber();
-    if (column_number != -1) {
-      builder->AppendCharacter(':');
-      Handle<String> column_string = isolate->factory()->NumberToString(
-          handle(Smi::FromInt(column_number), isolate), isolate);
-      builder->AppendString(column_string);
-    }
-  }
-
-  return recv;
-}
-
-int StringIndexOf(Isolate* isolate, Handle<String> subject,
-                  Handle<String> pattern) {
-  if (pattern->length() > subject->length()) return -1;
-  return String::IndexOf(isolate, subject, pattern, 0);
-}
-
-// Returns true iff
-// 1. the subject ends with '.' + pattern, or
-// 2. subject == pattern.
-bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
-                              Handle<String> pattern) {
-  if (String::Equals(subject, pattern)) return true;
-
-  FlatStringReader subject_reader(isolate, String::Flatten(subject));
-  FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
-
-  int pattern_index = pattern_reader.length() - 1;
-  int subject_index = subject_reader.length() - 1;
-  for (int i = 0; i <= pattern_reader.length(); i++) {  // Iterate over len + 1.
-    if (subject_index < 0) {
-      return false;
-    }
-
-    const uc32 subject_char = subject_reader.Get(subject_index);
-    if (i == pattern_reader.length()) {
-      if (subject_char != '.') return false;
-    } else if (subject_char != pattern_reader.Get(pattern_index)) {
-      return false;
-    }
-
-    pattern_index--;
-    subject_index--;
-  }
-
-  return true;
-}
-
-MaybeHandle<JSObject> AppendMethodCall(Isolate* isolate, Handle<JSObject> recv,
-                                       CallSite* call_site,
-                                       IncrementalStringBuilder* builder) {
-  Handle<Object> type_name = call_site->GetTypeName();
-  Handle<Object> method_name = call_site->GetMethodName();
-  Handle<Object> function_name = call_site->GetFunctionName();
-
-  if (IsNonEmptyString(function_name)) {
-    Handle<String> function_string = Handle<String>::cast(function_name);
-    if (IsNonEmptyString(type_name)) {
-      Handle<String> type_string = Handle<String>::cast(type_name);
-      bool starts_with_type_name =
-          (StringIndexOf(isolate, function_string, type_string) == 0);
-      if (!starts_with_type_name) {
-        builder->AppendString(type_string);
-        builder->AppendCharacter('.');
-      }
-    }
-    builder->AppendString(function_string);
-
-    if (IsNonEmptyString(method_name)) {
-      Handle<String> method_string = Handle<String>::cast(method_name);
-      if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
-        builder->AppendCString(" [as ");
-        builder->AppendString(method_string);
-        builder->AppendCharacter(']');
-      }
-    }
-  } else {
-    builder->AppendString(Handle<String>::cast(type_name));
-    builder->AppendCharacter('.');
-    if (IsNonEmptyString(method_name)) {
-      builder->AppendString(Handle<String>::cast(method_name));
-    } else {
-      builder->AppendCString("<anonymous>");
-    }
-  }
-
-  return recv;
-}
-
-}  // namespace
-
-MaybeHandle<String> CallSiteUtils::ToString(Isolate* isolate,
-                                            Handle<Object> receiver) {
-  if (!receiver->IsJSObject()) {
-    THROW_NEW_ERROR(
-        isolate,
-        NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
-                     isolate->factory()->NewStringFromAsciiChecked("toString"),
-                     receiver),
-        String);
-  }
-  Handle<JSObject> recv = Handle<JSObject>::cast(receiver);
-
-  if (!JSReceiver::HasOwnProperty(
-           recv, isolate->factory()->call_site_position_symbol())
-           .FromMaybe(false)) {
-    THROW_NEW_ERROR(
-        isolate,
-        NewTypeError(MessageTemplate::kCallSiteMethod,
-                     isolate->factory()->NewStringFromAsciiChecked("toString")),
-        String);
-  }
-
-  IncrementalStringBuilder builder(isolate);
-
-  CallSite call_site(isolate, recv);
-  if (call_site.IsWasm()) {
-    RETURN_ON_EXCEPTION(isolate,
-                        AppendWasmToString(isolate, recv, &call_site, &builder),
-                        String);
-    RETURN_RESULT(isolate, builder.Finish(), String);
-  }
-
-  DCHECK(!call_site.IsWasm());
-  Handle<Object> function_name = call_site.GetFunctionName();
-
-  const bool is_toplevel = call_site.IsToplevel();
-  const bool is_constructor = call_site.IsConstructor();
-  const bool is_method_call = !(is_toplevel || is_constructor);
-
-  if (is_method_call) {
-    RETURN_ON_EXCEPTION(
-        isolate, AppendMethodCall(isolate, recv, &call_site, &builder), String);
-  } else if (is_constructor) {
-    builder.AppendCString("new ");
-    if (IsNonEmptyString(function_name)) {
-      builder.AppendString(Handle<String>::cast(function_name));
-    } else {
-      builder.AppendCString("<anonymous>");
-    }
-  } else if (IsNonEmptyString(function_name)) {
-    builder.AppendString(Handle<String>::cast(function_name));
-  } else {
-    RETURN_ON_EXCEPTION(isolate,
-                        AppendFileLocation(isolate, recv, &call_site, &builder),
-                        String);
-    RETURN_RESULT(isolate, builder.Finish(), String);
-  }
-
-  builder.AppendCString(" (");
-  RETURN_ON_EXCEPTION(
-      isolate, AppendFileLocation(isolate, recv, &call_site, &builder), String);
-  builder.AppendCString(")");
-
-  RETURN_RESULT(isolate, builder.Finish(), String);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/messages.h b/src/messages.h
index cf49ac9..e7bbcc3 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -19,6 +19,8 @@
 namespace internal {
 
 // Forward declarations.
+class AbstractCode;
+class FrameArray;
 class JSMessageObject;
 class LookupIterator;
 class SourceInfo;
@@ -42,38 +44,142 @@
   Handle<JSFunction> function_;
 };
 
-
-class CallSite {
+class StackFrameBase {
  public:
-  CallSite(Isolate* isolate, Handle<JSObject> call_site_obj);
+  virtual ~StackFrameBase() {}
 
-  Handle<Object> GetFileName();
-  Handle<Object> GetFunctionName();
-  Handle<Object> GetScriptNameOrSourceUrl();
-  Handle<Object> GetMethodName();
-  Handle<Object> GetTypeName();
-  Handle<Object> GetEvalOrigin();
+  virtual Handle<Object> GetReceiver() const = 0;
+  virtual Handle<Object> GetFunction() const = 0;
+
+  virtual Handle<Object> GetFileName() = 0;
+  virtual Handle<Object> GetFunctionName() = 0;
+  virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
+  virtual Handle<Object> GetMethodName() = 0;
+  virtual Handle<Object> GetTypeName() = 0;
+  virtual Handle<Object> GetEvalOrigin() = 0;
+
+  virtual int GetPosition() const = 0;
   // Return 1-based line number, including line offset.
-  int GetLineNumber();
+  virtual int GetLineNumber() = 0;
   // Return 1-based column number, including column offset if first line.
-  int GetColumnNumber();
-  bool IsNative();
-  bool IsToplevel();
-  bool IsEval();
-  bool IsConstructor();
+  virtual int GetColumnNumber() = 0;
 
-  bool IsJavaScript() { return !fun_.is_null(); }
-  bool IsWasm() { return !wasm_obj_.is_null(); }
+  virtual bool IsNative() = 0;
+  virtual bool IsToplevel() = 0;
+  virtual bool IsEval() = 0;
+  virtual bool IsConstructor() = 0;
+  virtual bool IsStrict() const = 0;
 
-  int wasm_func_index() const { return wasm_func_index_; }
+  virtual MaybeHandle<String> ToString() = 0;
+};
+
+class JSStackFrame : public StackFrameBase {
+ public:
+  JSStackFrame(Isolate* isolate, Handle<Object> receiver,
+               Handle<JSFunction> function, Handle<AbstractCode> code,
+               int offset);
+  virtual ~JSStackFrame() {}
+
+  Handle<Object> GetReceiver() const override { return receiver_; }
+  Handle<Object> GetFunction() const override;
+
+  Handle<Object> GetFileName() override;
+  Handle<Object> GetFunctionName() override;
+  Handle<Object> GetScriptNameOrSourceUrl() override;
+  Handle<Object> GetMethodName() override;
+  Handle<Object> GetTypeName() override;
+  Handle<Object> GetEvalOrigin() override;
+
+  int GetPosition() const override;
+  int GetLineNumber() override;
+  int GetColumnNumber() override;
+
+  bool IsNative() override;
+  bool IsToplevel() override;
+  bool IsEval() override;
+  bool IsConstructor() override;
+  bool IsStrict() const override { return is_strict_; }
+
+  MaybeHandle<String> ToString() override;
+
+ private:
+  JSStackFrame();
+  void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
+  bool HasScript() const;
+  Handle<Script> GetScript() const;
+
+  Isolate* isolate_;
+
+  Handle<Object> receiver_;
+  Handle<JSFunction> function_;
+  Handle<AbstractCode> code_;
+  int offset_;
+
+  bool force_constructor_;
+  bool is_strict_;
+
+  friend class FrameArrayIterator;
+};
+
+class WasmStackFrame : public StackFrameBase {
+ public:
+  virtual ~WasmStackFrame() {}
+
+  Handle<Object> GetReceiver() const override { return wasm_obj_; }
+  Handle<Object> GetFunction() const override;
+
+  Handle<Object> GetFileName() override { return Null(); }
+  Handle<Object> GetFunctionName() override;
+  Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
+  Handle<Object> GetMethodName() override { return Null(); }
+  Handle<Object> GetTypeName() override { return Null(); }
+  Handle<Object> GetEvalOrigin() override { return Null(); }
+
+  int GetPosition() const override;
+  int GetLineNumber() override { return wasm_func_index_; }
+  int GetColumnNumber() override { return -1; }
+
+  bool IsNative() override { return false; }
+  bool IsToplevel() override { return false; }
+  bool IsEval() override { return false; }
+  bool IsConstructor() override { return false; }
+  bool IsStrict() const override { return false; }
+
+  MaybeHandle<String> ToString() override;
+
+ private:
+  void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+  Handle<Object> Null() const;
+
+  Isolate* isolate_;
+
+  Handle<Object> wasm_obj_;
+  uint32_t wasm_func_index_;
+  Handle<AbstractCode> code_;
+  int offset_;
+
+  friend class FrameArrayIterator;
+};
+
+class FrameArrayIterator {
+ public:
+  FrameArrayIterator(Isolate* isolate, Handle<FrameArray> array,
+                     int frame_ix = 0);
+
+  StackFrameBase* Frame();
+
+  bool HasNext() const;
+  void Next();
 
  private:
   Isolate* isolate_;
-  Handle<Object> receiver_;
-  Handle<JSFunction> fun_;
-  int32_t pos_ = -1;
-  Handle<JSObject> wasm_obj_;
-  uint32_t wasm_func_index_ = static_cast<uint32_t>(-1);
+
+  Handle<FrameArray> array_;
+  int next_frame_ix_;
+
+  WasmStackFrame wasm_frame_;
+  JSStackFrame js_frame_;
 };
 
 // Determines how stack trace collection skips frames.
@@ -107,16 +213,6 @@
                                               Handle<Object> stack_trace);
 };
 
-class CallSiteUtils : public AllStatic {
- public:
-  static MaybeHandle<Object> Construct(Isolate* isolate,
-                                       Handle<Object> receiver,
-                                       Handle<Object> fun, Handle<Object> pos,
-                                       Handle<Object> strict_mode);
-
-  static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
-};
-
 #define MESSAGE_TEMPLATES(T)                                                   \
   /* Error */                                                                  \
   T(None, "")                                                                  \
@@ -158,6 +254,7 @@
   T(ConstructorNotFunction, "Constructor % requires 'new'")                    \
   T(ConstructorNotReceiver, "The .constructor property is not an object")      \
   T(CurrencyCode, "Currency code is required with currency style.")            \
+  T(CyclicModuleDependency, "Detected cycle while resolving name '%'")         \
   T(DataViewNotArrayBuffer,                                                    \
     "First argument to DataView constructor must be an ArrayBuffer")           \
   T(DateType, "this is not a Date object.")                                    \
@@ -402,6 +499,7 @@
   T(UnsupportedTimeZone, "Unsupported time zone specified %")                  \
   T(ValueOutOfRange, "Value % out of range for % options property %")          \
   /* SyntaxError */                                                            \
+  T(AmbiguousExport, "Multiple star exports provide name '%'")                 \
   T(BadGetterArity, "Getter must not have any formal parameters.")             \
   T(BadSetterArity, "Setter must have exactly one formal parameter.")          \
   T(ConstructorIsAccessor, "Class constructor may not be an accessor")         \
@@ -454,8 +552,6 @@
   T(NoCatchOrFinally, "Missing catch or finally after try")                    \
   T(NotIsvar, "builtin %%IS_VAR: not a variable")                              \
   T(ParamAfterRest, "Rest parameter must be last formal parameter")            \
-  T(InvalidRestParameter,                                                      \
-    "Rest parameter must be an identifier or destructuring pattern")           \
   T(PushPastSafeLength,                                                        \
     "Pushing % elements on an array-like of length % "                         \
     "is disallowed, as the total surpasses 2**53-1")                           \
@@ -497,19 +593,10 @@
   T(UnexpectedEOS, "Unexpected end of input")                                  \
   T(UnexpectedFunctionSent,                                                    \
     "function.sent expression is not allowed outside a generator")             \
-  T(UnexpectedInsideTailCall, "Unexpected expression inside tail call")        \
   T(UnexpectedReserved, "Unexpected reserved word")                            \
   T(UnexpectedStrictReserved, "Unexpected strict mode reserved word")          \
   T(UnexpectedSuper, "'super' keyword unexpected here")                        \
-  T(UnexpectedSloppyTailCall,                                                  \
-    "Tail call expressions are not allowed in non-strict mode")                \
   T(UnexpectedNewTarget, "new.target expression is not allowed here")          \
-  T(UnexpectedTailCall, "Tail call expression is not allowed here")            \
-  T(UnexpectedTailCallInCatchBlock,                                            \
-    "Tail call expression in catch block when finally block is also present")  \
-  T(UnexpectedTailCallInForInOf, "Tail call expression in for-in/of body")     \
-  T(UnexpectedTailCallInTryBlock, "Tail call expression in try block")         \
-  T(UnexpectedTailCallOfEval, "Tail call of a direct eval is not allowed")     \
   T(UnexpectedTemplateString, "Unexpected template string")                    \
   T(UnexpectedToken, "Unexpected token %")                                     \
   T(UnexpectedTokenIdentifier, "Unexpected identifier")                        \
@@ -517,6 +604,7 @@
   T(UnexpectedTokenString, "Unexpected string")                                \
   T(UnexpectedTokenRegExp, "Unexpected regular expression")                    \
   T(UnknownLabel, "Undefined label '%'")                                       \
+  T(UnresolvableExport, "Module does not provide an export named '%'")         \
   T(UnterminatedArgList, "missing ) after argument list")                      \
   T(UnterminatedRegExp, "Invalid regular expression: missing /")               \
   T(UnterminatedTemplate, "Unterminated template literal")                     \
@@ -540,7 +628,18 @@
   T(WasmTrapFuncInvalid, "invalid function")                                   \
   T(WasmTrapFuncSigMismatch, "function signature mismatch")                    \
   T(WasmTrapInvalidIndex, "invalid index into function table")                 \
-  T(WasmTrapTypeError, "invalid type")
+  T(WasmTrapTypeError, "invalid type")                                         \
+  /* DataCloneError messages */                                                \
+  T(DataCloneError, "% could not be cloned.")                                  \
+  T(DataCloneErrorNeuteredArrayBuffer,                                         \
+    "An ArrayBuffer is neutered and could not be cloned.")                     \
+  T(DataCloneErrorSharedArrayBufferNotTransferred,                             \
+    "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must be "      \
+    "transferred.")                                                            \
+  T(DataCloneDeserializationError, "Unable to deserialize cloned data.")       \
+  T(DataCloneDeserializationVersionError,                                      \
+    "Unable to deserialize cloned data due to invalid or unsupported "         \
+    "version.")
 
 class MessageTemplate {
  public:
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 20a8a11..f5b235d 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -2453,6 +2453,11 @@
   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
 }
 
+void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r2));
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+}
 
 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
     FPURegister ft) {
@@ -2460,6 +2465,37 @@
   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
 }
 
+void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r2));
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+}
+
+void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r2));
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+}
+
+void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
+}
+
+void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
+}
+
+void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
+}
+
+void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
+}
 
 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
@@ -2492,13 +2528,11 @@
 
 
 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
-  DCHECK(!IsMipsArchVariant(kMips32r6));
   GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
 }
 
 
 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
-  DCHECK(!IsMipsArchVariant(kMips32r6));
   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
 }
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 0e41671..e58abd8 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -878,7 +878,14 @@
   void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
   void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
   void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+  void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+  void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
   void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void abs_s(FPURegister fd, FPURegister fs);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 844958e..43e6735 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1782,7 +1782,6 @@
   // a2 : feedback vector
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1801,7 +1800,7 @@
   Register feedback_map = t1;
   Register weak_value = t4;
   __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
-  __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
+  __ Branch(&done, eq, a1, Operand(weak_value));
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&done, eq, t2, Operand(at));
   __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
@@ -1823,7 +1822,7 @@
   // Make sure the function is the Array() function
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
   __ Branch(&megamorphic, ne, a1, Operand(t2));
-  __ jmp(&done_increment_count);
+  __ jmp(&done);
 
   __ bind(&miss);
 
@@ -1850,28 +1849,19 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ Branch(&done_initialize_count);
+  __ Branch(&done);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ li(t0, Operand(Smi::FromInt(1)));
-  __ Branch(USE_DELAY_SLOT, &done);
-  __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+  __ bind(&done);
 
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   __ Addu(t0, t0, Operand(Smi::FromInt(1)));
   __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
-  __ bind(&done);
 }
 
 
@@ -1917,6 +1907,14 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ Lsa(at, feedback_vector, slot, kPointerSizeLog2 - kSmiTagSize);
+  __ lw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+  __ Addu(slot, slot, Operand(Smi::FromInt(1)));
+  __ sw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // a1 - function
@@ -1929,10 +1927,7 @@
   __ li(a0, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(a3, a3, Operand(Smi::FromInt(1)));
-  __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+  IncrementCallCount(masm, a2, a3);
 
   __ mov(a2, t0);
   __ mov(a3, a1);
@@ -1945,7 +1940,7 @@
   // a1 - function
   // a3 - slot id (Smi)
   // a2 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1974,13 +1969,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(a1, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(a3, a3, Operand(Smi::FromInt(1)));
-  __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, a2, a3);
+
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
@@ -2021,6 +2014,10 @@
   __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
 
   __ bind(&call);
+  IncrementCallCount(masm, a2, a3);
+
+  __ bind(&call_count_incremented);
+
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
@@ -2046,11 +2043,6 @@
   __ lw(t1, NativeContextMemOperand());
   __ Branch(&miss, ne, t0, Operand(t1));
 
-  // Initialize the call counter.
-  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ li(t0, Operand(Smi::FromInt(1)));
-  __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // a2 - vector
   // a3 - slot
@@ -2058,9 +2050,11 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(a2, a3);
     __ Push(cp, a1);
     __ CallStub(&create_stub);
     __ Pop(cp, a1);
+    __ Pop(a2, a3);
   }
 
   __ Branch(&call_function);
@@ -2070,7 +2064,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ Branch(&call);
+  __ Branch(&call_count_incremented);
 }
 
 
@@ -2275,293 +2269,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-  // Stack frame on entry.
-  //  ra: return address
-  //  sp[0]: to
-  //  sp[4]: from
-  //  sp[8]: string
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length.
-  // If any of these assumptions fail, we call the runtime system.
-
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
-
-  __ lw(a2, MemOperand(sp, kToOffset));
-  __ lw(a3, MemOperand(sp, kFromOffset));
-  STATIC_ASSERT(kFromOffset == kToOffset + 4);
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
-  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
-  // safe in this case.
-  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
-  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
-  // Both a2 and a3 are untagged integers.
-
-  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
-
-  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
-  __ Subu(a2, a2, a3);
-
-  // Make sure first argument is a string.
-  __ lw(v0, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(v0, &runtime);
-  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ And(t0, a1, Operand(kIsNotStringMask));
-
-  __ Branch(&runtime, ne, t0, Operand(zero_reg));
-
-  Label single_char;
-  __ Branch(&single_char, eq, a2, Operand(1));
-
-  // Short-cut for the case of trivial substring.
-  Label return_v0;
-  // v0: original string
-  // a2: result string length
-  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
-  __ sra(t0, t0, 1);
-  // Return original string.
-  __ Branch(&return_v0, eq, a2, Operand(t0));
-  // Longer than original string's length or negative: unsafe arguments.
-  __ Branch(&runtime, hi, a2, Operand(t0));
-  // Shorter than original string's length: an actual substring.
-
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into t1.
-  // v0: original string
-  // a1: instance type
-  // a2: length
-  // a3: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ And(t0, a1, Operand(kIsIndirectStringMask));
-  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
-  // t0 is used as a scratch register and can be overwritten in either case.
-  __ And(t0, a1, Operand(kSlicedNotConsMask));
-  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
-  __ LoadRoot(t0, Heap::kempty_stringRootIndex);
-  __ Branch(&runtime, ne, t1, Operand(t0));
-  __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
-  // Update instance type.
-  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-  __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-  __ sra(t0, t0, 1);  // Add offset to index.
-  __ Addu(a3, a3, t0);
-  // Update instance type.
-  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(t1, v0);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // t1: underlying subject string
-    // a1: instance type of underlying subject string
-    // a2: length
-    // a3: adjusted start index (untagged)
-    // Short slice.  Copy instead of slicing.
-    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ And(t0, a1, Operand(kStringEncodingMask));
-    __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
-    __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
-    __ bind(&set_slice_header);
-    __ sll(a3, a3, 1);
-    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-    __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-    __ jmp(&return_v0);
-
-    __ bind(&copy_routine);
-  }
-
-  // t1: underlying subject string
-  // a1: instance type of underlying subject string
-  // a2: length
-  // a3: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t0, a1, Operand(kExternalStringTag));
-  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ And(t0, a1, Operand(kShortExternalStringTag));
-  __ Branch(&runtime, ne, t0, Operand(zero_reg));
-  __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
-  // t1 already points to the first character of underlying string.
-  __ jmp(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ And(t0, a1, Operand(kStringEncodingMask));
-  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
-
-  // Allocate and copy the resulting ASCII string.
-  __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
-
-  // Locate first character of substring to copy.
-  __ Addu(t1, t1, a3);
-
-  // Locate first character of result.
-  __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // v0: result string
-  // a1: first character of result string
-  // a2: result string length
-  // t1: first character of substring to copy
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
-  __ jmp(&return_v0);
-
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
-
-  // Locate first character of substring to copy.
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ Lsa(t1, t1, a3, 1);
-  // Locate first character of result.
-  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // v0: result string.
-  // a1: first character of result.
-  // a2: result length.
-  // t1: first character of substring to copy.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
-
-  __ bind(&return_v0);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
-  __ DropAndRet(3);
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // v0: original string
-  // a1: instance type
-  // a2: length
-  // a3: from index (untagged)
-  __ SmiTag(a3, a3);
-  StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ DropAndRet(3);
-  generator.SkipSlow(masm, &runtime);
-}
-
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes on argument in a0.
-  Label is_number;
-  __ JumpIfSmi(a0, &is_number);
-
-  Label not_string;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_string);
-
-  Label not_heap_number;
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
-  __ bind(&not_oddball);
-
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes on argument in a0.
-  Label is_number;
-  __ JumpIfSmi(a0, &is_number);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_name);
-
-  Label not_heap_number;
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
-  __ bind(&not_oddball);
-
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3915,7 +3622,7 @@
   __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
 
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ mov(feedback, too_far);
 
   __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4624,7 +4331,7 @@
     Label too_big_for_new_space;
     __ bind(&allocate);
     __ Branch(&too_big_for_new_space, gt, t0,
-              Operand(Page::kMaxRegularHeapObjectSize));
+              Operand(kMaxRegularHeapObjectSize));
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(t0);
@@ -4968,8 +4675,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ Branch(&too_big_for_new_space, gt, t0,
-            Operand(Page::kMaxRegularHeapObjectSize));
+  __ Branch(&too_big_for_new_space, gt, t0, Operand(kMaxRegularHeapObjectSize));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(t0);
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 3afb881..ad97e41 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -123,116 +123,6 @@
 }
 
 
-// -----------------------------------------------------------------------------
-// Instructions.
-
-bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
-  Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
-  switch (opcode) {
-    case J:
-    case JAL:
-    case BEQ:
-    case BNE:
-    case BLEZ:  // POP06 bgeuc/bleuc, blezalc, bgezalc
-    case BGTZ:  // POP07 bltuc/bgtuc, bgtzalc, bltzalc
-    case BEQL:
-    case BNEL:
-    case BLEZL:  // POP26 bgezc, blezc, bgec/blec
-    case BGTZL:  // POP27 bgtzc, bltzc, bltc/bgtc
-    case BC:
-    case BALC:
-    case POP10:  // beqzalc, bovc, beqc
-    case POP30:  // bnezalc, bnvc, bnec
-    case POP66:  // beqzc, jic
-    case POP76:  // bnezc, jialc
-      return true;
-    case REGIMM:
-      switch (instr & kRtFieldMask) {
-        case BLTZ:
-        case BGEZ:
-        case BLTZAL:
-        case BGEZAL:
-          return true;
-        default:
-          return false;
-      }
-      break;
-    case SPECIAL:
-      switch (instr & kFunctionFieldMask) {
-        case JR:
-        case JALR:
-          return true;
-        default:
-          return false;
-      }
-      break;
-    case COP1:
-      switch (instr & kRsFieldMask) {
-        case BC1:
-        case BC1EQZ:
-        case BC1NEZ:
-          return true;
-          break;
-        default:
-          return false;
-      }
-      break;
-    default:
-      return false;
-  }
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
-  switch (OpcodeFieldRaw()) {
-    case JAL:
-      return true;
-    case POP76:
-      if (RsFieldRawNoAssert() == JIALC)
-        return true;  // JIALC
-      else
-        return false;  // BNEZC
-    case REGIMM:
-      switch (RtFieldRaw()) {
-        case BGEZAL:
-        case BLTZAL:
-          return true;
-      default:
-        return false;
-      }
-    case SPECIAL:
-      switch (FunctionFieldRaw()) {
-        case JALR:
-          return true;
-        default:
-          return false;
-      }
-    default:
-      return false;
-  }
-}
-
-
-bool Instruction::IsTrap() const {
-  if (OpcodeFieldRaw() != SPECIAL) {
-    return false;
-  } else {
-    switch (FunctionFieldRaw()) {
-      case BREAK:
-      case TGE:
-      case TGEU:
-      case TLT:
-      case TLTU:
-      case TEQ:
-      case TNE:
-        return true;
-      default:
-        return false;
-    }
-  }
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 8301c5e..200939d 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -525,6 +525,8 @@
   FLOOR_W_S = ((1U << 3) + 7),
   RECIP_S = ((2U << 3) + 5),
   RSQRT_S = ((2U << 3) + 6),
+  MADDF_S = ((3U << 3) + 0),
+  MSUBF_S = ((3U << 3) + 1),
   CLASS_S = ((3U << 3) + 3),
   CVT_D_S = ((4U << 3) + 1),
   CVT_W_S = ((4U << 3) + 4),
@@ -550,6 +552,8 @@
   FLOOR_W_D = ((1U << 3) + 7),
   RECIP_D = ((2U << 3) + 5),
   RSQRT_D = ((2U << 3) + 6),
+  MADDF_D = ((3U << 3) + 0),
+  MSUBF_D = ((3U << 3) + 1),
   CLASS_D = ((3U << 3) + 3),
   MIN = ((3U << 3) + 4),
   MINA = ((3U << 3) + 5),
@@ -616,8 +620,12 @@
   MOVF = ((2U << 3) + 1),      // Function field for MOVT.fmt and MOVF.fmt
   SELNEZ_C = ((2U << 3) + 7),  // COP1 on FPR registers.
   // COP1 Encoding of Function Field When rs=PS.
+
   // COP1X Encoding of Function Field.
+  MADD_S = ((4U << 3) + 0),
   MADD_D = ((4U << 3) + 1),
+  MSUB_S = ((5U << 3) + 0),
+  MSUB_D = ((5U << 3) + 1),
 
   // PCREL Encoding of rt Field.
   ADDIUPC = ((0U << 2) + 0),
@@ -858,8 +866,7 @@
   return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
 }
 
-
-class Instruction {
+class InstructionBase {
  public:
   enum {
     kInstrSize = 4,
@@ -869,6 +876,9 @@
     kPCReadOffset = 0
   };
 
+  // Instruction type.
+  enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
   // Get the raw instruction bits.
   inline Instr InstructionBits() const {
     return *reinterpret_cast<const Instr*>(this);
@@ -889,16 +899,6 @@
     return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
   }
 
-  // Instruction type.
-  enum Type {
-    kRegisterType,
-    kImmediateType,
-    kJumpType,
-    kUnsupported = -1
-  };
-
-  enum TypeChecks { NORMAL, EXTRA };
-
 
   static constexpr uint64_t kOpcodeImmediateTypeMask =
       OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
@@ -943,82 +943,14 @@
       FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
       FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
 
-  // Get the encoding type of the instruction.
-  inline Type InstructionType(TypeChecks checks = NORMAL) const;
-
   // Accessors for the different named fields used in the MIPS encoding.
   inline Opcode OpcodeValue() const {
     return static_cast<Opcode>(
         Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
   }
 
-  inline int RsValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kRsShift + kRsBits - 1, kRsShift);
-  }
-
-  inline int RtValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kRtShift + kRtBits - 1, kRtShift);
-  }
-
-  inline int RdValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kRdShift + kRdBits - 1, kRdShift);
-  }
-
-  inline int SaValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kSaShift + kSaBits - 1, kSaShift);
-  }
-
-  inline int LsaSaValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
-  }
-
-  inline int FunctionValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
-  }
-
-  inline int FdValue() const {
-    return Bits(kFdShift + kFdBits - 1, kFdShift);
-  }
-
-  inline int FsValue() const {
-    return Bits(kFsShift + kFsBits - 1, kFsShift);
-  }
-
-  inline int FtValue() const {
-    return Bits(kFtShift + kFtBits - 1, kFtShift);
-  }
-
-  inline int FrValue() const {
-    return Bits(kFrShift + kFrBits -1, kFrShift);
-  }
-
-  inline int Bp2Value() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
-  }
-
-  // Float Compare condition code instruction bits.
-  inline int FCccValue() const {
-    return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
-  }
-
-  // Float Branch condition code instruction bits.
-  inline int FBccValue() const {
-    return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
-  }
-
-  // Float Branch true/false instruction bit.
-  inline int FBtrueValue() const {
-    return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+  inline int FunctionFieldRaw() const {
+    return InstructionBits() & kFunctionFieldMask;
   }
 
   // Return the fields at their original place in the instruction encoding.
@@ -1026,39 +958,125 @@
     return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
   }
 
-  inline int RsFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return InstructionBits() & kRsFieldMask;
-  }
-
-  // Same as above function, but safe to call within InstructionType().
+  // Safe to call within InstructionType().
   inline int RsFieldRawNoAssert() const {
     return InstructionBits() & kRsFieldMask;
   }
 
+  inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; }
+
+  // Get the encoding type of the instruction.
+  inline Type InstructionType() const;
+
+ protected:
+  InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+  inline int RsValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return InstructionBase::Bits(kRsShift + kRsBits - 1, kRsShift);
+  }
+
+  inline int RtValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kRtShift + kRtBits - 1, kRtShift);
+  }
+
+  inline int RdValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+  }
+
+  inline int SaValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kSaShift + kSaBits - 1, kSaShift);
+  }
+
+  inline int LsaSaValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+  }
+
+  inline int FunctionValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+  }
+
+  inline int FdValue() const {
+    return this->Bits(kFdShift + kFdBits - 1, kFdShift);
+  }
+
+  inline int FsValue() const {
+    return this->Bits(kFsShift + kFsBits - 1, kFsShift);
+  }
+
+  inline int FtValue() const {
+    return this->Bits(kFtShift + kFtBits - 1, kFtShift);
+  }
+
+  inline int FrValue() const {
+    return this->Bits(kFrShift + kFrBits - 1, kFrShift);
+  }
+
+  inline int Bp2Value() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+  }
+
+  // Float Compare condition code instruction bits.
+  inline int FCccValue() const {
+    return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+  }
+
+  // Float Branch condition code instruction bits.
+  inline int FBccValue() const {
+    return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+  }
+
+  // Float Branch true/false instruction bit.
+  inline int FBtrueValue() const {
+    return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+  }
+
+  // Return the fields at their original place in the instruction encoding.
+  inline Opcode OpcodeFieldRaw() const {
+    return static_cast<Opcode>(this->InstructionBits() & kOpcodeMask);
+  }
+
+  inline int RsFieldRaw() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->InstructionBits() & kRsFieldMask;
+  }
+
   inline int RtFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return InstructionBits() & kRtFieldMask;
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->InstructionBits() & kRtFieldMask;
   }
 
   inline int RdFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return InstructionBits() & kRdFieldMask;
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->InstructionBits() & kRdFieldMask;
   }
 
   inline int SaFieldRaw() const {
-    return InstructionBits() & kSaFieldMask;
+    return this->InstructionBits() & kSaFieldMask;
   }
 
   inline int FunctionFieldRaw() const {
-    return InstructionBits() & kFunctionFieldMask;
+    return this->InstructionBits() & kFunctionFieldMask;
   }
 
   // Get the secondary field according to the opcode.
   inline int SecondaryValue() const {
-    Opcode op = OpcodeFieldRaw();
+    Opcode op = this->OpcodeFieldRaw();
     switch (op) {
       case SPECIAL:
       case SPECIAL2:
@@ -1073,34 +1091,34 @@
   }
 
   inline int32_t ImmValue(int bits) const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(bits - 1, 0);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(bits - 1, 0);
   }
 
   inline int32_t Imm16Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
   }
 
   inline int32_t Imm18Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
   }
 
   inline int32_t Imm19Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
   }
 
   inline int32_t Imm21Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
   }
 
   inline int32_t Imm26Value() const {
-    DCHECK((InstructionType() == kJumpType) ||
-           (InstructionType() == kImmediateType));
-    return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+    DCHECK((this->InstructionType() == InstructionBase::kJumpType) ||
+           (this->InstructionType() == InstructionBase::kImmediateType));
+    return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
   }
 
   static bool IsForbiddenAfterBranchInstr(Instr instr);
@@ -1108,7 +1126,7 @@
   // Say if the instruction should not be used in a branch delay slot or
   // immediately after a compact branch.
   inline bool IsForbiddenAfterBranch() const {
-    return IsForbiddenAfterBranchInstr(InstructionBits());
+    return IsForbiddenAfterBranchInstr(this->InstructionBits());
   }
 
   inline bool IsForbiddenInBranchDelay() const {
@@ -1119,7 +1137,10 @@
   bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
   bool IsTrap() const;
+};
 
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
   // Instructions are read of out a code stream. The only way to get a
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instruction.
@@ -1148,26 +1169,14 @@
 
 const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-
-Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
-  if (checks == EXTRA) {
-    if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
-      return kImmediateType;
-    }
-  }
+InstructionBase::Type InstructionBase::InstructionType() const {
   switch (OpcodeFieldRaw()) {
     case SPECIAL:
-      if (checks == EXTRA) {
-        if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
-            kFunctionFieldRegisterTypeMask) {
-          return kRegisterType;
-        } else {
-          return kUnsupported;
-        }
-      } else {
+      if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+          kFunctionFieldRegisterTypeMask) {
         return kRegisterType;
       }
-      break;
+      return kUnsupported;
     case SPECIAL2:
       switch (FunctionFieldRaw()) {
         case MUL:
@@ -1222,16 +1231,124 @@
       return kJumpType;
 
     default:
-      if (checks == NORMAL) {
         return kImmediateType;
-      } else {
-        return kUnsupported;
-      }
   }
 }
 
 #undef OpcodeToBitNumber
 #undef FunctionFieldToBitNumber
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsLinkingInstruction() const {
+  uint32_t op = this->OpcodeFieldRaw();
+  switch (op) {
+    case JAL:
+      return true;
+    case POP76:
+      if (this->RsFieldRawNoAssert() == JIALC)
+        return true;  // JIALC
+      else
+        return false;  // BNEZC
+    case REGIMM:
+      switch (this->RtFieldRaw()) {
+        case BGEZAL:
+        case BLTZAL:
+          return true;
+        default:
+          return false;
+      }
+    case SPECIAL:
+      switch (this->FunctionFieldRaw()) {
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+    default:
+      return false;
+  }
+}
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+  if (this->OpcodeFieldRaw() != SPECIAL) {
+    return false;
+  } else {
+    switch (this->FunctionFieldRaw()) {
+      case BREAK:
+      case TGE:
+      case TGEU:
+      case TLT:
+      case TLTU:
+      case TEQ:
+      case TNE:
+        return true;
+      default:
+        return false;
+    }
+  }
+}
+
+// static
+template <class T>
+bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
+  Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+  switch (opcode) {
+    case J:
+    case JAL:
+    case BEQ:
+    case BNE:
+    case BLEZ:  // POP06 bgeuc/bleuc, blezalc, bgezalc
+    case BGTZ:  // POP07 bltuc/bgtuc, bgtzalc, bltzalc
+    case BEQL:
+    case BNEL:
+    case BLEZL:  // POP26 bgezc, blezc, bgec/blec
+    case BGTZL:  // POP27 bgtzc, bltzc, bltc/bgtc
+    case BC:
+    case BALC:
+    case POP10:  // beqzalc, bovc, beqc
+    case POP30:  // bnezalc, bnvc, bnec
+    case POP66:  // beqzc, jic
+    case POP76:  // bnezc, jialc
+      return true;
+    case REGIMM:
+      switch (instr & kRtFieldMask) {
+        case BLTZ:
+        case BGEZ:
+        case BLTZAL:
+        case BGEZAL:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    case SPECIAL:
+      switch (instr & kFunctionFieldMask) {
+        case JR:
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    case COP1:
+      switch (instr & kRsFieldMask) {
+        case BC1:
+        case BC1EQZ:
+        case BC1NEZ:
+          return true;
+          break;
+        default:
+          return false;
+      }
+      break;
+    default:
+      return false;
+  }
+}
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index bd07874..f541e91 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -918,6 +918,12 @@
       case CVT_D_S:
         Format(instr, "cvt.d.'t 'fd, 'fs");
         break;
+      case MADDF_S:
+        Format(instr, "maddf.s  'fd, 'fs, 'ft");
+        break;
+      case MSUBF_S:
+        Format(instr, "msubf.s  'fd, 'fs, 'ft");
+        break;
       default:
         Format(instr, "unknown.cop1.'t");
         break;
@@ -928,7 +934,17 @@
 
 void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
   if (!DecodeTypeRegisterRsType(instr)) {
-    Format(instr, "unknown.cop1.'t");
+    switch (instr->FunctionFieldRaw()) {
+      case MADDF_D:
+        Format(instr, "maddf.d  'fd, 'fs, 'ft");
+        break;
+      case MSUBF_D:
+        Format(instr, "msubf.d  'fd, 'fs, 'ft");
+        break;
+      default:
+        Format(instr, "unknown.cop1.'t");
+        break;
+    }
   }
 }
 
@@ -1360,9 +1376,18 @@
       break;
     case COP1X:
       switch (instr->FunctionFieldRaw()) {
+        case MADD_S:
+          Format(instr, "madd.s  'fd, 'fr, 'fs, 'ft");
+          break;
         case MADD_D:
           Format(instr, "madd.d  'fd, 'fr, 'fs, 'ft");
           break;
+        case MSUB_S:
+          Format(instr, "msub.s  'fd, 'fr, 'fs, 'ft");
+          break;
+        case MSUB_D:
+          Format(instr, "msub.d  'fd, 'fr, 'fs, 'ft");
+          break;
         default:
           UNREACHABLE();
       }
@@ -1687,7 +1712,7 @@
   out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                    "%08x       ",
                                    instr->InstructionBits());
-  switch (instr->InstructionType(Instruction::EXTRA)) {
+  switch (instr->InstructionType()) {
     case Instruction::kRegisterType: {
       DecodeTypeRegister(instr);
       break;
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index bafe0b6..aed4142 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -40,13 +40,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return t0; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return t1; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
+const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register StoreTransitionDescriptor::MapRegister() { return t1; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
@@ -357,7 +353,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       a0,  // callee
@@ -392,7 +388,19 @@
       a0,  // argument count (not including receiver)
       a3,  // new target
       a1,  // constructor to call
-      a2   // address of the first argument
+      a2,  // allocation site feedback if available, undefined otherwise.
+      t4   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      a0,  // argument count (not including receiver)
+      a1,  // the target to call verified to be Array function
+      a2,  // allocation site feedback
+      a3,  // address of first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index dba1fae..d61717d 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -184,9 +184,7 @@
                                 Condition cc,
                                 Label* branch) {
   DCHECK(cc == eq || cc == ne);
-  const int mask =
-      1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
-  CheckPageFlag(object, scratch, mask, cc, branch);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
 }
 
 
@@ -1126,8 +1124,13 @@
   if (rt.is_reg()) {
     sltu(rd, rs, rt.rm());
   } else {
-    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+    const uint32_t int16_min = std::numeric_limits<int16_t>::min();
+    if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+      // Imm range is: [0, 32767].
       sltiu(rd, rs, rt.imm32_);
+    } else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
+      // Imm range is: [max_unsigned-32767,max_unsigned].
+      sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
     } else {
       // li handles the relocation.
       DCHECK(!rs.is(at));
@@ -1915,9 +1918,12 @@
 }
 
 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
-  Register scratch1 = t8;
-  Register scratch2 = t9;
-  if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    // r6 neg_s changes the sign for NaN-like operands as well.
+    neg_s(fd, fs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
     Label is_nan, done;
     Register scratch1 = t8;
     Register scratch2 = t9;
@@ -1926,7 +1932,6 @@
     // For NaN input, neg_s will return the same NaN value,
     // while the sign has to be changed separately.
     neg_s(fd, fs);  // In delay slot.
-
     bind(&is_nan);
     mfc1(scratch1, fs);
     And(scratch2, scratch1, Operand(~kBinary32SignMask));
@@ -1935,27 +1940,24 @@
     Or(scratch2, scratch2, scratch1);
     mtc1(scratch2, fd);
     bind(&done);
-  } else {
-    mfc1(scratch1, fs);
-    And(scratch2, scratch1, Operand(~kBinary32SignMask));
-    And(scratch1, scratch1, Operand(kBinary32SignMask));
-    Xor(scratch1, scratch1, Operand(kBinary32SignMask));
-    Or(scratch2, scratch2, scratch1);
-    mtc1(scratch2, fd);
   }
 }
 
 void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
-  Register scratch1 = t8;
-  Register scratch2 = t9;
-  if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    // r6 neg_d changes the sign for NaN-like operands as well.
+    neg_d(fd, fs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
     Label is_nan, done;
+    Register scratch1 = t8;
+    Register scratch2 = t9;
     BranchF64(nullptr, &is_nan, eq, fs, fs);
     Branch(USE_DELAY_SLOT, &done);
     // For NaN input, neg_d will return the same NaN value,
     // while the sign has to be changed separately.
     neg_d(fd, fs);  // In delay slot.
-
     bind(&is_nan);
     Mfhc1(scratch1, fs);
     And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
@@ -1964,14 +1966,6 @@
     Or(scratch2, scratch2, scratch1);
     Mthc1(scratch2, fd);
     bind(&done);
-  } else {
-    Move_d(fd, fs);
-    Mfhc1(scratch1, fs);
-    And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
-    And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-    Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-    Or(scratch2, scratch2, scratch1);
-    Mthc1(scratch2, fd);
   }
 }
 
@@ -2170,7 +2164,7 @@
     // Check for unordered (NaN) cases.
     if (nan) {
       bool long_branch =
-          nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
+          nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
       if (!IsMipsArchVariant(kMips32r6)) {
         if (long_branch) {
           Label skip;
@@ -2209,7 +2203,7 @@
 
     if (target) {
       bool long_branch =
-          target->is_bound() ? is_near(target) : is_trampoline_emitted();
+          target->is_bound() ? !is_near(target) : is_trampoline_emitted();
       if (long_branch) {
         Label skip;
         Condition neg_cond = NegateFpuCondition(cond);
@@ -4220,7 +4214,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
@@ -4402,7 +4396,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
 
   // Make object size into bytes.
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index aa5b0f9..4024e52 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -215,6 +215,18 @@
                            Func GetLabelFunction);
 #undef COND_ARGS
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count,
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 59dc300..bd42399 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -808,8 +808,8 @@
   last_debugger_input_ = input;
 }
 
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
-                            size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+                            void* start_addr, size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
   start -= intra_line;
@@ -829,8 +829,10 @@
   }
 }
 
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
-  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                   void* page) {
+  base::CustomMatcherHashMap::Entry* entry =
+      i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -840,7 +842,8 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+                             intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -852,7 +855,8 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+                            Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -885,7 +889,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new base::HashMap(&ICacheMatch);
+    i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -997,11 +1001,12 @@
 
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
-         entry = i_cache->Next(entry)) {
+    for (base::CustomMatcherHashMap::Entry* entry = i_cache->Start();
+         entry != nullptr; entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
     delete i_cache;
@@ -1929,16 +1934,16 @@
 
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
+void Simulator::SoftwareInterrupt() {
   // There are several instructions that could get us here,
   // the break_ instruction, or several variants of traps. All
   // Are "SPECIAL" class opcode, and are distinuished by function.
-  int32_t func = instr->FunctionFieldRaw();
-  uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+  int32_t func = instr_.FunctionFieldRaw();
+  uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
 
   // We first check if we met a call_rt_redirected.
-  if (instr->InstructionBits() == rtCallRedirInstr) {
-    Redirection* redirection = Redirection::FromSwiInstruction(instr);
+  if (instr_.InstructionBits() == rtCallRedirInstr) {
+    Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
     int32_t arg0 = get_register(a0);
     int32_t arg1 = get_register(a1);
     int32_t arg2 = get_register(a2);
@@ -2173,7 +2178,7 @@
       PrintWatchpoint(code);
     } else {
       IncreaseStopCounter(code);
-      HandleStop(code, instr);
+      HandleStop(code, instr_.instr());
     }
   } else {
     // All remaining break_ codes, and all traps are handled here.
@@ -2366,6 +2371,49 @@
   return result;
 }
 
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+                                              int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+  DCHECK(std::isnan(arg));
+  T qNaN = std::numeric_limits<T>::quiet_NaN();
+  if (keepSign == KeepSign::yes) {
+    return std::copysign(qNaN, result);
+  }
+  return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+  if (std::isnan(first)) {
+    return FPUCanonalizeNaNArg(result, first, keepSign);
+  }
+  return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+  if (std::isnan(first)) {
+    return FPUCanonalizeNaNArg(result, first, keepSign);
+  }
+  return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+  return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+  T result = f(first, args...);
+  if (std::isnan(result)) {
+    result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+  }
+  return result;
+}
+
 // Handle execution based on instruction types.
 
 void Simulator::DecodeTypeRegisterDRsType() {
@@ -2373,15 +2421,14 @@
   uint32_t cc, fcsr_cc;
   int64_t i64;
   fs = get_fpu_register_double(fs_reg());
-  ft = (get_instr()->FunctionFieldRaw() != MOVF)
-           ? get_fpu_register_double(ft_reg())
-           : 0.0;
+  ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg())
+                                           : 0.0;
   fd = get_fpu_register_double(fd_reg());
   int64_t ft_int = bit_cast<int64_t>(ft);
   int64_t fd_int = bit_cast<int64_t>(fd);
-  cc = get_instr()->FCccValue();
+  cc = instr_.FCccValue();
   fcsr_cc = get_fcsr_condition_bit(cc);
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case RINT: {
       DCHECK(IsMipsArchVariant(kMips32r6));
       double result, temp, temp_result;
@@ -2440,7 +2487,7 @@
     }
     case MOVN_C: {
       DCHECK(IsMipsArchVariant(kMips32r2));
-      int32_t rt_reg = get_instr()->RtValue();
+      int32_t rt_reg = instr_.RtValue();
       int32_t rt = get_register(rt_reg);
       if (rt != 0) {
         set_fpu_register_double(fd_reg(), fs);
@@ -2451,7 +2498,7 @@
       // Same function field for MOVT.D and MOVF.D
       uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
       ft_cc = get_fcsr_condition_bit(ft_cc);
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
         if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
       } else {
@@ -2477,43 +2524,65 @@
       set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
       break;
     case ADD_D:
-      set_fpu_register_double(fd_reg(), fs + ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
       break;
     case SUB_D:
-      set_fpu_register_double(fd_reg(), fs - ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
+      break;
+    case MADDF_D:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_double(fd_reg(), fd + (fs * ft));
+      break;
+    case MSUBF_D:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_double(fd_reg(), fd - (fs * ft));
       break;
     case MUL_D:
-      set_fpu_register_double(fd_reg(), fs * ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
       break;
     case DIV_D:
-      set_fpu_register_double(fd_reg(), fs / ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
       break;
     case ABS_D:
-      set_fpu_register_double(fd_reg(), fabs(fs));
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_D:
       set_fpu_register_double(fd_reg(), fs);
       break;
     case NEG_D:
-      set_fpu_register_double(fd_reg(), -fs);
+      set_fpu_register_double(
+          fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
+                                           KeepSign::yes, fs));
       break;
     case SQRT_D:
-      lazily_initialize_fast_sqrt(isolate_);
-      set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
       break;
-    case RSQRT_D: {
-      DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
-      lazily_initialize_fast_sqrt(isolate_);
-      double result = 1.0 / fast_sqrt(fs, isolate_);
-      set_fpu_register_double(fd_reg(), result);
+    case RSQRT_D:
+      set_fpu_register_double(
+          fd_reg(), FPUCanonalizeOperation(
+                        [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
       break;
-    }
-    case RECIP_D: {
-      DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
-      double result = 1.0 / fs;
-      set_fpu_register_double(fd_reg(), result);
+    case RECIP_D:
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
       break;
-    }
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
       break;
@@ -2744,7 +2813,7 @@
   float fs = get_fpu_register_float(fs_reg());
   float ft = get_fpu_register_float(ft_reg());
   int32_t alu_out = 0x12345678;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case CVT_S_W:  // Convert word to float (single).
       alu_out = get_fpu_register_signed_word(fs_reg());
       set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
@@ -2840,9 +2909,9 @@
   int32_t ft_int = bit_cast<int32_t>(ft);
   int32_t fd_int = bit_cast<int32_t>(fd);
   uint32_t cc, fcsr_cc;
-  cc = get_instr()->FCccValue();
+  cc = instr_.FCccValue();
   fcsr_cc = get_fcsr_condition_bit(cc);
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case RINT: {
       DCHECK(IsMipsArchVariant(kMips32r6));
       float result, temp_result;
@@ -2882,43 +2951,65 @@
       break;
     }
     case ADD_S:
-      set_fpu_register_float(fd_reg(), fs + ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+                                 fs, ft));
       break;
     case SUB_S:
-      set_fpu_register_float(fd_reg(), fs - ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+                                 fs, ft));
+      break;
+    case MADDF_S:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_float(fd_reg(), fd + (fs * ft));
+      break;
+    case MSUBF_S:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_float(fd_reg(), fd - (fs * ft));
       break;
     case MUL_S:
-      set_fpu_register_float(fd_reg(), fs * ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+                                 fs, ft));
       break;
     case DIV_S:
-      set_fpu_register_float(fd_reg(), fs / ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+                                 fs, ft));
       break;
     case ABS_S:
-      set_fpu_register_float(fd_reg(), fabs(fs));
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_S:
       set_fpu_register_float(fd_reg(), fs);
       break;
     case NEG_S:
-      set_fpu_register_float(fd_reg(), -fs);
+      set_fpu_register_float(
+          fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
+                                           KeepSign::yes, fs));
       break;
     case SQRT_S:
-      lazily_initialize_fast_sqrt(isolate_);
-      set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
       break;
-    case RSQRT_S: {
-      DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
-      lazily_initialize_fast_sqrt(isolate_);
-      float result = 1.0 / fast_sqrt(fs, isolate_);
-      set_fpu_register_float(fd_reg(), result);
+    case RSQRT_S:
+      set_fpu_register_float(
+          fd_reg(), FPUCanonalizeOperation(
+                        [](float src) { return 1.0 / std::sqrt(src); }, fs));
       break;
-    }
-    case RECIP_S: {
-      DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
-      float result = 1.0 / fs;
-      set_fpu_register_float(fd_reg(), result);
+    case RECIP_S:
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
       break;
-    }
     case C_F_D:
       set_fcsr_bit(fcsr_cc, false);
       break;
@@ -3047,7 +3138,7 @@
       uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
       ft_cc = get_fcsr_condition_bit(ft_cc);
 
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
         if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
       } else {
@@ -3209,7 +3300,7 @@
 void Simulator::DecodeTypeRegisterLRsType() {
   double fs = get_fpu_register_double(fs_reg());
   double ft = get_fpu_register_double(ft_reg());
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case CVT_D_L:  // Mips32r2 instruction.
       // Watch the signs here, we want 2 32-bit vals
       // to make a sign-64.
@@ -3311,7 +3402,7 @@
 
 
 void Simulator::DecodeTypeRegisterCOP1() {
-  switch (get_instr()->RsFieldRaw()) {
+  switch (instr_.RsFieldRaw()) {
     case CFC1:
       // At the moment only FCSR is supported.
       DCHECK(fs_reg() == kFCSRRegister);
@@ -3374,14 +3465,43 @@
 
 
 void Simulator::DecodeTypeRegisterCOP1X() {
-  switch (get_instr()->FunctionFieldRaw()) {
-    case MADD_D:
+  switch (instr_.FunctionFieldRaw()) {
+    case MADD_S: {
+      DCHECK(IsMipsArchVariant(kMips32r2));
+      float fr, ft, fs;
+      fr = get_fpu_register_float(fr_reg());
+      fs = get_fpu_register_float(fs_reg());
+      ft = get_fpu_register_float(ft_reg());
+      set_fpu_register_float(fd_reg(), fs * ft + fr);
+      break;
+    }
+    case MSUB_S: {
+      DCHECK(IsMipsArchVariant(kMips32r2));
+      float fr, ft, fs;
+      fr = get_fpu_register_float(fr_reg());
+      fs = get_fpu_register_float(fs_reg());
+      ft = get_fpu_register_float(ft_reg());
+      set_fpu_register_float(fd_reg(), fs * ft - fr);
+      break;
+    }
+    case MADD_D: {
+      DCHECK(IsMipsArchVariant(kMips32r2));
       double fr, ft, fs;
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
       set_fpu_register_double(fd_reg(), fs * ft + fr);
       break;
+    }
+    case MSUB_D: {
+      DCHECK(IsMipsArchVariant(kMips32r2));
+      double fr, ft, fs;
+      fr = get_fpu_register_double(fr_reg());
+      fs = get_fpu_register_double(fs_reg());
+      ft = get_fpu_register_double(ft_reg());
+      set_fpu_register_double(fd_reg(), fs * ft - fr);
+      break;
+    }
     default:
       UNREACHABLE();
   }
@@ -3394,7 +3514,7 @@
   uint64_t u64hilo = 0;
   bool do_interrupt = false;
 
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case SELEQZ_S:
       DCHECK(IsMipsArchVariant(kMips32r6));
       set_register(rd_reg(), rt() == 0 ? rs() : 0);
@@ -3534,7 +3654,7 @@
       break;
     case DIV:
       if (IsMipsArchVariant(kMips32r6)) {
-        switch (get_instr()->SaValue()) {
+        switch (sa()) {
           case DIV_OP:
             if (rs() == INT_MIN && rt() == -1) {
               set_register(rd_reg(), INT_MIN);
@@ -3569,7 +3689,7 @@
       break;
     case DIVU:
       if (IsMipsArchVariant(kMips32r6)) {
-        switch (get_instr()->SaValue()) {
+        switch (sa()) {
           case DIV_OP:
             if (rt_u() != 0) {
               set_register(rd_reg(), rs_u() / rt_u());
@@ -3676,9 +3796,9 @@
       }
       break;
     case MOVCI: {
-      uint32_t cc = get_instr()->FBccValue();
+      uint32_t cc = instr_.FBccValue();
       uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
       } else {
         if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
@@ -3695,14 +3815,14 @@
       UNREACHABLE();
   }
   if (do_interrupt) {
-    SoftwareInterrupt(get_instr());
+    SoftwareInterrupt();
   }
 }
 
 
 void Simulator::DecodeTypeRegisterSPECIAL2() {
   int32_t alu_out;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case MUL:
       // Only the lower 32 bits are kept.
       alu_out = rs_u() * rt_u();
@@ -3725,7 +3845,7 @@
 
 void Simulator::DecodeTypeRegisterSPECIAL3() {
   int32_t alu_out;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case INS: {  // Mips32r2 instruction.
       // Interpret rd field as 5-bit msb of insert.
       uint16_t msb = rd_reg();
@@ -3750,7 +3870,7 @@
       break;
     }
     case BSHFL: {
-      int sa = get_instr()->SaFieldRaw() >> kSaShift;
+      int sa = instr_.SaFieldRaw() >> kSaShift;
       switch (sa) {
         case BITSWAP: {
           uint32_t input = static_cast<uint32_t>(rt());
@@ -3822,7 +3942,7 @@
           break;
         }
         default: {
-          const uint8_t bp = get_instr()->Bp2Value();
+          const uint8_t bp = instr_.Bp2Value();
           sa >>= kBp2Bits;
           switch (sa) {
             case ALIGN: {
@@ -3850,16 +3970,9 @@
   }
 }
 
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
-  const Opcode op = instr->OpcodeFieldRaw();
-
-  // Set up the variables if needed before executing the instruction.
-  //  ConfigureTypeRegister(instr);
-  set_instr(instr);
-
+void Simulator::DecodeTypeRegister() {
   // ---------- Execution.
-  switch (op) {
+  switch (instr_.OpcodeFieldRaw()) {
     case COP1:
       DecodeTypeRegisterCOP1();
       break;
@@ -3882,17 +3995,17 @@
 
 
 // Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
+void Simulator::DecodeTypeImmediate() {
   // Instruction fields.
-  Opcode op = instr->OpcodeFieldRaw();
-  int32_t rs_reg = instr->RsValue();
-  int32_t rs = get_register(instr->RsValue());
+  Opcode op = instr_.OpcodeFieldRaw();
+  int32_t rs_reg = instr_.RsValue();
+  int32_t rs = get_register(instr_.RsValue());
   uint32_t rs_u = static_cast<uint32_t>(rs);
-  int32_t rt_reg = instr->RtValue();  // Destination register.
+  int32_t rt_reg = instr_.RtValue();  // Destination register.
   int32_t rt = get_register(rt_reg);
-  int16_t imm16 = instr->Imm16Value();
+  int16_t imm16 = instr_.Imm16Value();
 
-  int32_t ft_reg = instr->FtValue();  // Destination register.
+  int32_t ft_reg = instr_.FtValue();  // Destination register.
 
   // Zero extended immediate.
   uint32_t oe_imm16 = 0xffff & imm16;
@@ -3912,38 +4025,36 @@
   int32_t addr = 0x0;
 
   // Branch instructions common part.
-  auto BranchAndLinkHelper = [this, instr, &next_pc,
-                              &execute_branch_delay_instruction](
-      bool do_branch) {
-    execute_branch_delay_instruction = true;
-    int32_t current_pc = get_pc();
-    if (do_branch) {
-      int16_t imm16 = instr->Imm16Value();
-      next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
-      set_register(31, current_pc + 2 * Instruction::kInstrSize);
-    } else {
-      next_pc = current_pc + 2 * Instruction::kInstrSize;
-    }
-  };
+  auto BranchAndLinkHelper =
+      [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
+        execute_branch_delay_instruction = true;
+        int32_t current_pc = get_pc();
+        if (do_branch) {
+          int16_t imm16 = this->instr_.Imm16Value();
+          next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          set_register(31, current_pc + 2 * Instruction::kInstrSize);
+        } else {
+          next_pc = current_pc + 2 * Instruction::kInstrSize;
+        }
+      };
 
-  auto BranchHelper = [this, instr, &next_pc,
+  auto BranchHelper = [this, &next_pc,
                        &execute_branch_delay_instruction](bool do_branch) {
     execute_branch_delay_instruction = true;
     int32_t current_pc = get_pc();
     if (do_branch) {
-      int16_t imm16 = instr->Imm16Value();
+      int16_t imm16 = this->instr_.Imm16Value();
       next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
     } else {
       next_pc = current_pc + 2 * Instruction::kInstrSize;
     }
   };
 
-  auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
-                                                            int bits) {
+  auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
     int32_t current_pc = get_pc();
     CheckForbiddenSlot(current_pc);
     if (do_branch) {
-      int32_t imm = instr->ImmValue(bits);
+      int32_t imm = this->instr_.ImmValue(bits);
       imm <<= 32 - bits;
       imm >>= 32 - bits;
       next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
@@ -3951,28 +4062,27 @@
     }
   };
 
-  auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+  auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) {
     int32_t current_pc = get_pc();
     CheckForbiddenSlot(current_pc);
     if (do_branch) {
-      int32_t imm = instr->ImmValue(bits);
+      int32_t imm = this->instr_.ImmValue(bits);
       imm <<= 32 - bits;
       imm >>= 32 - bits;
       next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
     }
   };
 
-
   switch (op) {
     // ------------- COP1. Coprocessor instructions.
     case COP1:
-      switch (instr->RsFieldRaw()) {
+      switch (instr_.RsFieldRaw()) {
         case BC1: {  // Branch on coprocessor condition.
           // Floating point.
-          uint32_t cc = instr->FBccValue();
+          uint32_t cc = instr_.FBccValue();
           uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
           uint32_t cc_value = test_fcsr_bit(fcsr_cc);
-          bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+          bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value;
           BranchHelper(do_branch);
           break;
         }
@@ -3988,7 +4098,7 @@
       break;
     // ------------- REGIMM class.
     case REGIMM:
-      switch (instr->RtFieldRaw()) {
+      switch (instr_.RtFieldRaw()) {
         case BLTZ:
           BranchHelper(rs < 0);
           break;
@@ -4196,7 +4306,7 @@
       set_register(rt_reg, ReadB(rs + se_imm16));
       break;
     case LH:
-      set_register(rt_reg, ReadH(rs + se_imm16, instr));
+      set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr()));
       break;
     case LWL: {
       // al_offset is offset of the effective address within an aligned word.
@@ -4204,20 +4314,20 @@
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      alu_out = ReadW(addr, instr);
+      alu_out = ReadW(addr, instr_.instr());
       alu_out <<= byte_shift * 8;
       alu_out |= rt & mask;
       set_register(rt_reg, alu_out);
       break;
     }
     case LW:
-      set_register(rt_reg, ReadW(rs + se_imm16, instr));
+      set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
       break;
     case LBU:
       set_register(rt_reg, ReadBU(rs + se_imm16));
       break;
     case LHU:
-      set_register(rt_reg, ReadHU(rs + se_imm16, instr));
+      set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr()));
       break;
     case LWR: {
       // al_offset is offset of the effective address within an aligned word.
@@ -4225,7 +4335,7 @@
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
       addr = rs + se_imm16 - al_offset;
-      alu_out = ReadW(addr, instr);
+      alu_out = ReadW(addr, instr_.instr());
       alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
       alu_out |= rt & mask;
       set_register(rt_reg, alu_out);
@@ -4235,7 +4345,7 @@
       WriteB(rs + se_imm16, static_cast<int8_t>(rt));
       break;
     case SH:
-      WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
+      WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr_.instr());
       break;
     case SWL: {
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
@@ -4243,40 +4353,40 @@
       uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
       addr = rs + se_imm16 - al_offset;
       // Value to be written in memory.
-      uint32_t mem_value = ReadW(addr, instr) & mask;
+      uint32_t mem_value = ReadW(addr, instr_.instr()) & mask;
       mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
-      WriteW(addr, mem_value, instr);
+      WriteW(addr, mem_value, instr_.instr());
       break;
     }
     case SW:
-      WriteW(rs + se_imm16, rt, instr);
+      WriteW(rs + se_imm16, rt, instr_.instr());
       break;
     case SWR: {
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint32_t mask = (1 << al_offset * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      uint32_t mem_value = ReadW(addr, instr);
+      uint32_t mem_value = ReadW(addr, instr_.instr());
       mem_value = (rt << al_offset * 8) | (mem_value & mask);
-      WriteW(addr, mem_value, instr);
+      WriteW(addr, mem_value, instr_.instr());
       break;
     }
     case LWC1:
       set_fpu_register_hi_word(ft_reg, 0);
-      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
+      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
       break;
     case LDC1:
-      set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
+      set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
       break;
     case SWC1:
-      WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr);
+      WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr_.instr());
       break;
     case SDC1:
-      WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
+      WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
       break;
     // ------------- PC-Relative instructions.
     case PCREL: {
       // rt field: checking 5-bits.
-      int32_t imm21 = instr->Imm21Value();
+      int32_t imm21 = instr_.Imm21Value();
       int32_t current_pc = get_pc();
       uint8_t rt = (imm21 >> kImm16Bits);
       switch (rt) {
@@ -4288,7 +4398,7 @@
           alu_out = current_pc + (se_imm16 << 16);
           break;
         default: {
-          int32_t imm19 = instr->Imm19Value();
+          int32_t imm19 = instr_.Imm19Value();
           // rt field: checking the most significant 2-bits.
           rt = (imm21 >> kImm19Bits);
           switch (rt) {
@@ -4336,13 +4446,15 @@
 
 
 // Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
-void Simulator::DecodeTypeJump(Instruction* instr) {
+void Simulator::DecodeTypeJump() {
+  SimInstruction simInstr = instr_;
   // Get current pc.
   int32_t current_pc = get_pc();
   // Get unchanged bits of pc.
   int32_t pc_high_bits = current_pc & 0xf0000000;
   // Next pc.
-  int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+
+  int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
 
   // Execute branch delay slot.
   // We don't check for end_sim_pc. First it should not be met as the current pc
@@ -4353,7 +4465,7 @@
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
-  if (instr->IsLinkingInstruction()) {
+  if (simInstr.IsLinkingInstruction()) {
     set_register(31, current_pc + 2 * Instruction::kInstrSize);
   }
   set_pc(next_pc);
@@ -4375,15 +4487,16 @@
     dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
   }
 
-  switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+  instr_ = instr;
+  switch (instr_.InstructionType()) {
     case Instruction::kRegisterType:
-      DecodeTypeRegister(instr);
+      DecodeTypeRegister();
       break;
     case Instruction::kImmediateType:
-      DecodeTypeImmediate(instr);
+      DecodeTypeImmediate();
       break;
     case Instruction::kJumpType:
-      DecodeTypeJump(instr);
+      DecodeTypeJump();
       break;
     default:
       UNSUPPORTED();
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 5c77756..3795eec 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -113,6 +113,39 @@
   char validity_map_[kValidityMapSize];  // One byte per line.
 };
 
+class SimInstructionBase : public InstructionBase {
+ public:
+  Type InstructionType() const { return type_; }
+  inline Instruction* instr() const { return instr_; }
+  inline int32_t operand() const { return operand_; }
+
+ protected:
+  SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+  explicit SimInstructionBase(Instruction* instr) {}
+
+  int32_t operand_;
+  Instruction* instr_;
+  Type type_;
+
+ private:
+  DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+  SimInstruction() {}
+
+  explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+  SimInstruction& operator=(Instruction* instr) {
+    operand_ = *reinterpret_cast<const int32_t*>(instr);
+    instr_ = instr;
+    type_ = InstructionBase::InstructionType();
+    DCHECK(reinterpret_cast<void*>(&operand_) == this);
+    return *this;
+  }
+};
+
 class Simulator {
  public:
   friend class MipsDebugger;
@@ -216,7 +249,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -236,7 +269,8 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_ra, end_sim_pc).
@@ -299,8 +333,10 @@
   inline int32_t SetDoubleHIW(double* addr);
   inline int32_t SetDoubleLOW(double* addr);
 
+  SimInstruction instr_;
+
   // Executing is handled based on the instruction type.
-  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeRegister();
 
   // Functions called from DecodeTypeRegister.
   void DecodeTypeRegisterCOP1();
@@ -322,39 +358,34 @@
 
   void DecodeTypeRegisterLRsType();
 
-  Instruction* currentInstr_;
-
-  inline Instruction* get_instr() const { return currentInstr_; }
-  inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
-
-  inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+  inline int32_t rs_reg() const { return instr_.RsValue(); }
   inline int32_t rs() const { return get_register(rs_reg()); }
   inline uint32_t rs_u() const {
     return static_cast<uint32_t>(get_register(rs_reg()));
   }
-  inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+  inline int32_t rt_reg() const { return instr_.RtValue(); }
   inline int32_t rt() const { return get_register(rt_reg()); }
   inline uint32_t rt_u() const {
     return static_cast<uint32_t>(get_register(rt_reg()));
   }
-  inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
-  inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
-  inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
-  inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
-  inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
-  inline int32_t sa() const { return currentInstr_->SaValue(); }
-  inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
+  inline int32_t rd_reg() const { return instr_.RdValue(); }
+  inline int32_t fr_reg() const { return instr_.FrValue(); }
+  inline int32_t fs_reg() const { return instr_.FsValue(); }
+  inline int32_t ft_reg() const { return instr_.FtValue(); }
+  inline int32_t fd_reg() const { return instr_.FdValue(); }
+  inline int32_t sa() const { return instr_.SaValue(); }
+  inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
 
   inline void SetResult(int32_t rd_reg, int32_t alu_out) {
     set_register(rd_reg, alu_out);
     TraceRegWr(alu_out);
   }
 
-  void DecodeTypeImmediate(Instruction* instr);
-  void DecodeTypeJump(Instruction* instr);
+  void DecodeTypeImmediate();
+  void DecodeTypeJump();
 
   // Used for breakpoints and traps.
-  void SoftwareInterrupt(Instruction* instr);
+  void SoftwareInterrupt();
 
   // Compact branch guard.
   void CheckForbiddenSlot(int32_t current_pc) {
@@ -400,9 +431,12 @@
   }
 
   // ICache.
-  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
-  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+  static void CheckICache(base::CustomMatcherHashMap* i_cache,
+                          Instruction* instr);
+  static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                 void* page);
 
   enum Exception {
     none,
@@ -448,7 +482,7 @@
   char* last_debugger_input_;
 
   // Icache simulation.
-  base::HashMap* i_cache_;
+  base::CustomMatcherHashMap* i_cache_;
 
   v8::internal::Isolate* isolate_;
 
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index 21a2434..b35b166 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -2780,12 +2780,49 @@
   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
 }
 
+void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+}
 
 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
     FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
 }
 
+void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+}
+
+void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+                       FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+}
+
+void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
+}
+
+void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
+}
+
+void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
+}
+
+void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
+}
 
 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
@@ -2818,13 +2855,11 @@
 
 
 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
-  DCHECK(kArchVariant == kMips64r2);
   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
 }
 
 
 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
-  DCHECK(kArchVariant == kMips64r2);
   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
 }
 
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index e269acf..dc3198c 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -939,7 +939,14 @@
   void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
   void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
   void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+  void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+  void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
   void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
   void abs_s(FPURegister fd, FPURegister fs);
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 4d9f120..e089b54 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -1783,7 +1783,6 @@
   // a2 : feedback vector
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1803,7 +1802,7 @@
   Register feedback_map = a6;
   Register weak_value = t0;
   __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
-  __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
+  __ Branch(&done, eq, a1, Operand(weak_value));
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&done, eq, a5, Operand(at));
   __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
@@ -1825,7 +1824,7 @@
   // Make sure the function is the Array() function
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
   __ Branch(&megamorphic, ne, a1, Operand(a5));
-  __ jmp(&done_increment_count);
+  __ jmp(&done);
 
   __ bind(&miss);
 
@@ -1853,32 +1852,21 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ Branch(&done_initialize_count);
+  __ Branch(&done);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
+  __ bind(&done);
 
-  __ SmiScale(a4, a3, kPointerSizeLog2);
-  __ Daddu(a4, a2, Operand(a4));
-  __ li(a5, Operand(Smi::FromInt(1)));
-  __ Branch(USE_DELAY_SLOT, &done);
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + kPointerSize));
-
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ SmiScale(a4, a3, kPointerSizeLog2);
   __ Daddu(a5, a2, Operand(a4));
   __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
   __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
   __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
-
-  __ bind(&done);
 }
 
 
@@ -1965,6 +1953,15 @@
   __ bind(&exit_);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ dsrl(t0, slot, 32 - kPointerSizeLog2);
+  __ Daddu(slot, feedback_vector, Operand(t0));
+  __ ld(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
+  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sd(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // a1 - function
@@ -1977,11 +1974,7 @@
   __ li(a0, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ dsrl(t0, a3, 32 - kPointerSizeLog2);
-  __ Daddu(a3, a2, Operand(t0));
-  __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
-  __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+  IncrementCallCount(masm, a2, a3);
 
   __ mov(a2, a4);
   __ mov(a3, a1);
@@ -1994,7 +1987,7 @@
   // a1 - function
   // a3 - slot id (Smi)
   // a2 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -2024,14 +2017,10 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(a1, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ dsrl(t0, a3, 32 - kPointerSizeLog2);
-  __ Daddu(a3, a2, Operand(t0));
-  __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
-  __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
-
   __ bind(&call_function);
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, a2, a3);
+
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
@@ -2073,6 +2062,10 @@
   __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
 
   __ bind(&call);
+  IncrementCallCount(masm, a2, a3);
+
+  __ bind(&call_count_incremented);
+
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
@@ -2098,12 +2091,6 @@
   __ ld(t1, NativeContextMemOperand());
   __ Branch(&miss, ne, t0, Operand(t1));
 
-  // Initialize the call counter.
-  __ dsrl(at, a3, 32 - kPointerSizeLog2);
-  __ Daddu(at, a2, Operand(at));
-  __ li(t0, Operand(Smi::FromInt(1)));
-  __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // a2 - vector
   // a3 - slot
@@ -2111,9 +2098,11 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(a2, a3);
     __ Push(cp, a1);
     __ CallStub(&create_stub);
     __ Pop(cp, a1);
+    __ Pop(a2, a3);
   }
 
   __ Branch(&call_function);
@@ -2123,7 +2112,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ Branch(&call);
+  __ Branch(&call_count_incremented);
 }
 
 
@@ -2283,293 +2272,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-  // Stack frame on entry.
-  //  ra: return address
-  //  sp[0]: to
-  //  sp[4]: from
-  //  sp[8]: string
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length.
-  // If any of these assumptions fail, we call the runtime system.
-
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
-
-  __ ld(a2, MemOperand(sp, kToOffset));
-  __ ld(a3, MemOperand(sp, kFromOffset));
-
-  STATIC_ASSERT(kSmiTag == 0);
-
-  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
-  // safe in this case.
-  __ JumpIfNotSmi(a2, &runtime);
-  __ JumpIfNotSmi(a3, &runtime);
-  // Both a2 and a3 are untagged integers.
-
-  __ SmiUntag(a2, a2);
-  __ SmiUntag(a3, a3);
-  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
-
-  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
-  __ Dsubu(a2, a2, a3);
-
-  // Make sure first argument is a string.
-  __ ld(v0, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(v0, &runtime);
-  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ And(a4, a1, Operand(kIsNotStringMask));
-
-  __ Branch(&runtime, ne, a4, Operand(zero_reg));
-
-  Label single_char;
-  __ Branch(&single_char, eq, a2, Operand(1));
-
-  // Short-cut for the case of trivial substring.
-  Label return_v0;
-  // v0: original string
-  // a2: result string length
-  __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
-  __ SmiUntag(a4);
-  // Return original string.
-  __ Branch(&return_v0, eq, a2, Operand(a4));
-  // Longer than original string's length or negative: unsafe arguments.
-  __ Branch(&runtime, hi, a2, Operand(a4));
-  // Shorter than original string's length: an actual substring.
-
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into a5.
-  // v0: original string
-  // a1: instance type
-  // a2: length
-  // a3: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ And(a4, a1, Operand(kIsIndirectStringMask));
-  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
-  // a4 is used as a scratch register and can be overwritten in either case.
-  __ And(a4, a1, Operand(kSlicedNotConsMask));
-  __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
-  __ LoadRoot(a4, Heap::kempty_stringRootIndex);
-  __ Branch(&runtime, ne, a5, Operand(a4));
-  __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
-  // Update instance type.
-  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
-  __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-  __ SmiUntag(a4);  // Add offset to index.
-  __ Daddu(a3, a3, a4);
-  // Update instance type.
-  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(a5, v0);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // a5: underlying subject string
-    // a1: instance type of underlying subject string
-    // a2: length
-    // a3: adjusted start index (untagged)
-    // Short slice.  Copy instead of slicing.
-    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ And(a4, a1, Operand(kStringEncodingMask));
-    __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
-    __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
-    __ bind(&set_slice_header);
-    __ SmiTag(a3);
-    __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
-    __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-    __ jmp(&return_v0);
-
-    __ bind(&copy_routine);
-  }
-
-  // a5: underlying subject string
-  // a1: instance type of underlying subject string
-  // a2: length
-  // a3: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(a4, a1, Operand(kExternalStringTag));
-  __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ And(a4, a1, Operand(kShortExternalStringTag));
-  __ Branch(&runtime, ne, a4, Operand(zero_reg));
-  __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
-  // a5 already points to the first character of underlying string.
-  __ jmp(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ And(a4, a1, Operand(kStringEncodingMask));
-  __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
-
-  // Allocate and copy the resulting one_byte string.
-  __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
-
-  // Locate first character of substring to copy.
-  __ Daddu(a5, a5, a3);
-
-  // Locate first character of result.
-  __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // v0: result string
-  // a1: first character of result string
-  // a2: result string length
-  // a5: first character of substring to copy
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
-  __ jmp(&return_v0);
-
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
-
-  // Locate first character of substring to copy.
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ Dlsa(a5, a5, a3, 1);
-  // Locate first character of result.
-  __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // v0: result string.
-  // a1: first character of result.
-  // a2: result length.
-  // a5: first character of substring to copy.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(
-      masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
-
-  __ bind(&return_v0);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
-  __ DropAndRet(3);
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // v0: original string
-  // a1: instance type
-  // a2: length
-  // a3: from index (untagged)
-  __ SmiTag(a3);
-  StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ DropAndRet(3);
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes on argument in a0.
-  Label is_number;
-  __ JumpIfSmi(a0, &is_number);
-
-  Label not_string;
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_string);
-
-  Label not_heap_number;
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
-  __ bind(&not_oddball);
-
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes on argument in a0.
-  Label is_number;
-  __ JumpIfSmi(a0, &is_number);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  // a0: receiver
-  // a1: receiver instance type
-  __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_name);
-
-  Label not_heap_number;
-  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
-  __ bind(&not_oddball);
-
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3927,7 +3629,7 @@
 
   __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ Move(feedback, too_far);
   __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(t9);
@@ -4638,7 +4340,7 @@
     Label too_big_for_new_space;
     __ bind(&allocate);
     __ Branch(&too_big_for_new_space, gt, a5,
-              Operand(Page::kMaxRegularHeapObjectSize));
+              Operand(kMaxRegularHeapObjectSize));
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ SmiTag(a0);
@@ -4993,8 +4695,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ Branch(&too_big_for_new_space, gt, a5,
-            Operand(Page::kMaxRegularHeapObjectSize));
+  __ Branch(&too_big_for_new_space, gt, a5, Operand(kMaxRegularHeapObjectSize));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ SmiTag(a0);
diff --git a/src/mips64/constants-mips64.cc b/src/mips64/constants-mips64.cc
index c0e98eb..11ae242 100644
--- a/src/mips64/constants-mips64.cc
+++ b/src/mips64/constants-mips64.cc
@@ -121,118 +121,6 @@
   // No Cregister with the reguested name found.
   return kInvalidFPURegister;
 }
-
-
-// -----------------------------------------------------------------------------
-// Instructions.
-
-bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
-  Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
-  switch (opcode) {
-    case J:
-    case JAL:
-    case BEQ:
-    case BNE:
-    case BLEZ:  // POP06 bgeuc/bleuc, blezalc, bgezalc
-    case BGTZ:  // POP07 bltuc/bgtuc, bgtzalc, bltzalc
-    case BEQL:
-    case BNEL:
-    case BLEZL:  // POP26 bgezc, blezc, bgec/blec
-    case BGTZL:  // POP27 bgtzc, bltzc, bltc/bgtc
-    case BC:
-    case BALC:
-    case POP10:  // beqzalc, bovc, beqc
-    case POP30:  // bnezalc, bnvc, bnec
-    case POP66:  // beqzc, jic
-    case POP76:  // bnezc, jialc
-      return true;
-    case REGIMM:
-      switch (instr & kRtFieldMask) {
-        case BLTZ:
-        case BGEZ:
-        case BLTZAL:
-        case BGEZAL:
-          return true;
-        default:
-          return false;
-      }
-      break;
-    case SPECIAL:
-      switch (instr & kFunctionFieldMask) {
-        case JR:
-        case JALR:
-          return true;
-        default:
-          return false;
-      }
-      break;
-    case COP1:
-      switch (instr & kRsFieldMask) {
-        case BC1:
-        case BC1EQZ:
-        case BC1NEZ:
-          return true;
-          break;
-        default:
-          return false;
-      }
-      break;
-    default:
-      return false;
-  }
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
-  switch (OpcodeFieldRaw()) {
-    case JAL:
-      return true;
-    case POP76:
-      if (RsFieldRawNoAssert() == JIALC)
-        return true;  // JIALC
-      else
-        return false;  // BNEZC
-    case REGIMM:
-      switch (RtFieldRaw()) {
-        case BGEZAL:
-        case BLTZAL:
-          return true;
-      default:
-        return false;
-      }
-    case SPECIAL:
-      switch (FunctionFieldRaw()) {
-        case JALR:
-          return true;
-        default:
-          return false;
-      }
-    default:
-      return false;
-  }
-}
-
-
-bool Instruction::IsTrap() const {
-  if (OpcodeFieldRaw() != SPECIAL) {
-    return false;
-  } else {
-    switch (FunctionFieldRaw()) {
-      case BREAK:
-      case TGE:
-      case TGEU:
-      case TLT:
-      case TLTU:
-      case TEQ:
-      case TNE:
-        return true;
-      default:
-        return false;
-    }
-  }
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h
index d2b1e92..f96ea23 100644
--- a/src/mips64/constants-mips64.h
+++ b/src/mips64/constants-mips64.h
@@ -555,6 +555,8 @@
   FLOOR_W_S = ((1U << 3) + 7),
   RECIP_S = ((2U << 3) + 5),
   RSQRT_S = ((2U << 3) + 6),
+  MADDF_S = ((3U << 3) + 0),
+  MSUBF_S = ((3U << 3) + 1),
   CLASS_S = ((3U << 3) + 3),
   CVT_D_S = ((4U << 3) + 1),
   CVT_W_S = ((4U << 3) + 4),
@@ -579,6 +581,8 @@
   FLOOR_W_D = ((1U << 3) + 7),
   RECIP_D = ((2U << 3) + 5),
   RSQRT_D = ((2U << 3) + 6),
+  MADDF_D = ((3U << 3) + 0),
+  MSUBF_D = ((3U << 3) + 1),
   CLASS_D = ((3U << 3) + 3),
   MIN = ((3U << 3) + 4),
   MINA = ((3U << 3) + 5),
@@ -646,8 +650,12 @@
   SELNEZ_C = ((2U << 3) + 7),  // COP1 on FPR registers.
 
   // COP1 Encoding of Function Field When rs=PS.
+
   // COP1X Encoding of Function Field.
+  MADD_S = ((4U << 3) + 0),
   MADD_D = ((4U << 3) + 1),
+  MSUB_S = ((5U << 3) + 0),
+  MSUB_D = ((5U << 3) + 1),
 
   // PCREL Encoding of rt Field.
   ADDIUPC = ((0U << 2) + 0),
@@ -891,8 +899,7 @@
   return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
 }
 
-
-class Instruction {
+class InstructionBase {
  public:
   enum {
     kInstrSize = 4,
@@ -902,6 +909,9 @@
     kPCReadOffset = 0
   };
 
+  // Instruction type.
+  enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
   // Get the raw instruction bits.
   inline Instr InstructionBits() const {
     return *reinterpret_cast<const Instr*>(this);
@@ -922,16 +932,6 @@
     return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
   }
 
-  // Instruction type.
-  enum Type {
-    kRegisterType,
-    kImmediateType,
-    kJumpType,
-    kUnsupported = -1
-  };
-
-  enum TypeChecks { NORMAL, EXTRA };
-
   static constexpr uint64_t kOpcodeImmediateTypeMask =
       OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
       OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@@ -988,9 +988,6 @@
       FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
       FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
 
-  // Get the encoding type of the instruction.
-  inline Type InstructionType(TypeChecks checks = NORMAL) const;
-
 
   // Accessors for the different named fields used in the MIPS encoding.
   inline Opcode OpcodeValue() const {
@@ -998,78 +995,8 @@
         Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
   }
 
-  inline int RsValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kRsShift + kRsBits - 1, kRsShift);
-  }
-
-  inline int RtValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kRtShift + kRtBits - 1, kRtShift);
-  }
-
-  inline int RdValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kRdShift + kRdBits - 1, kRdShift);
-  }
-
-  inline int SaValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kSaShift + kSaBits - 1, kSaShift);
-  }
-
-  inline int LsaSaValue() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
-  }
-
-  inline int FunctionValue() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
-  }
-
-  inline int FdValue() const {
-    return Bits(kFdShift + kFdBits - 1, kFdShift);
-  }
-
-  inline int FsValue() const {
-    return Bits(kFsShift + kFsBits - 1, kFsShift);
-  }
-
-  inline int FtValue() const {
-    return Bits(kFtShift + kFtBits - 1, kFtShift);
-  }
-
-  inline int FrValue() const {
-    return Bits(kFrShift + kFrBits -1, kFrShift);
-  }
-
-  inline int Bp2Value() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
-  }
-
-  inline int Bp3Value() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
-  }
-
-  // Float Compare condition code instruction bits.
-  inline int FCccValue() const {
-    return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
-  }
-
-  // Float Branch condition code instruction bits.
-  inline int FBccValue() const {
-    return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
-  }
-
-  // Float Branch true/false instruction bit.
-  inline int FBtrueValue() const {
-    return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+  inline int FunctionFieldRaw() const {
+    return InstructionBits() & kFunctionFieldMask;
   }
 
   // Return the fields at their original place in the instruction encoding.
@@ -1077,39 +1004,135 @@
     return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
   }
 
-  inline int RsFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return InstructionBits() & kRsFieldMask;
-  }
-
-  // Same as above function, but safe to call within InstructionType().
+  // Safe to call within InstructionType().
   inline int RsFieldRawNoAssert() const {
     return InstructionBits() & kRsFieldMask;
   }
 
+  inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; }
+
+  // Get the encoding type of the instruction.
+  inline Type InstructionType() const;
+
+ protected:
+  InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+  inline int RsValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kRsShift + kRsBits - 1, kRsShift);
+  }
+
+  inline int RtValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kRtShift + kRtBits - 1, kRtShift);
+  }
+
+  inline int RdValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+  }
+
+  inline int SaValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kSaShift + kSaBits - 1, kSaShift);
+  }
+
+  inline int LsaSaValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+  }
+
+  inline int FunctionValue() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+  }
+
+  inline int FdValue() const {
+    return this->Bits(kFdShift + kFdBits - 1, kFdShift);
+  }
+
+  inline int FsValue() const {
+    return this->Bits(kFsShift + kFsBits - 1, kFsShift);
+  }
+
+  inline int FtValue() const {
+    return this->Bits(kFtShift + kFtBits - 1, kFtShift);
+  }
+
+  inline int FrValue() const {
+    return this->Bits(kFrShift + kFrBits - 1, kFrShift);
+  }
+
+  inline int Bp2Value() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+  }
+
+  inline int Bp3Value() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
+  }
+
+  // Float Compare condition code instruction bits.
+  inline int FCccValue() const {
+    return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+  }
+
+  // Float Branch condition code instruction bits.
+  inline int FBccValue() const {
+    return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+  }
+
+  // Float Branch true/false instruction bit.
+  inline int FBtrueValue() const {
+    return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+  }
+
+  // Return the fields at their original place in the instruction encoding.
+  inline Opcode OpcodeFieldRaw() const {
+    return static_cast<Opcode>(this->InstructionBits() & kOpcodeMask);
+  }
+
+  inline int RsFieldRaw() const {
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->InstructionBits() & kRsFieldMask;
+  }
+
+  // Same as above function, but safe to call within InstructionType().
+  inline int RsFieldRawNoAssert() const {
+    return this->InstructionBits() & kRsFieldMask;
+  }
+
   inline int RtFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType ||
-           InstructionType() == kImmediateType);
-    return InstructionBits() & kRtFieldMask;
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+           this->InstructionType() == InstructionBase::kImmediateType);
+    return this->InstructionBits() & kRtFieldMask;
   }
 
   inline int RdFieldRaw() const {
-    DCHECK(InstructionType() == kRegisterType);
-    return InstructionBits() & kRdFieldMask;
+    DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+    return this->InstructionBits() & kRdFieldMask;
   }
 
   inline int SaFieldRaw() const {
-    return InstructionBits() & kSaFieldMask;
+    return this->InstructionBits() & kSaFieldMask;
   }
 
   inline int FunctionFieldRaw() const {
-    return InstructionBits() & kFunctionFieldMask;
+    return this->InstructionBits() & kFunctionFieldMask;
   }
 
   // Get the secondary field according to the opcode.
   inline int SecondaryValue() const {
-    Opcode op = OpcodeFieldRaw();
+    Opcode op = this->OpcodeFieldRaw();
     switch (op) {
       case SPECIAL:
       case SPECIAL2:
@@ -1124,34 +1147,34 @@
   }
 
   inline int32_t ImmValue(int bits) const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(bits - 1, 0);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(bits - 1, 0);
   }
 
   inline int32_t Imm16Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
   }
 
   inline int32_t Imm18Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
   }
 
   inline int32_t Imm19Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
   }
 
   inline int32_t Imm21Value() const {
-    DCHECK(InstructionType() == kImmediateType);
-    return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+    DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+    return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
   }
 
   inline int32_t Imm26Value() const {
-    DCHECK((InstructionType() == kJumpType) ||
-           (InstructionType() == kImmediateType));
-    return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+    DCHECK((this->InstructionType() == InstructionBase::kJumpType) ||
+           (this->InstructionType() == InstructionBase::kImmediateType));
+    return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
   }
 
   static bool IsForbiddenAfterBranchInstr(Instr instr);
@@ -1159,14 +1182,21 @@
   // Say if the instruction should not be used in a branch delay slot or
   // immediately after a compact branch.
   inline bool IsForbiddenAfterBranch() const {
-    return IsForbiddenAfterBranchInstr(InstructionBits());
+    return IsForbiddenAfterBranchInstr(this->InstructionBits());
+  }
+
+  inline bool IsForbiddenInBranchDelay() const {
+    return IsForbiddenAfterBranch();
   }
 
   // Say if the instruction 'links'. e.g. jal, bal.
   bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
   bool IsTrap() const;
+};
 
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
   // Instructions are read of out a code stream. The only way to get a
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instruction.
@@ -1194,26 +1224,14 @@
 const int kInvalidStackOffset = -1;
 const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-
-Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
-  if (checks == EXTRA) {
-    if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
-      return kImmediateType;
-    }
-  }
+InstructionBase::Type InstructionBase::InstructionType() const {
   switch (OpcodeFieldRaw()) {
     case SPECIAL:
-      if (checks == EXTRA) {
-        if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
-            kFunctionFieldRegisterTypeMask) {
-          return kRegisterType;
-        } else {
-          return kUnsupported;
-        }
-      } else {
+      if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+          kFunctionFieldRegisterTypeMask) {
         return kRegisterType;
       }
-      break;
+      return kUnsupported;
     case SPECIAL2:
       switch (FunctionFieldRaw()) {
         case MUL:
@@ -1290,17 +1308,123 @@
       return kJumpType;
 
     default:
-      if (checks == NORMAL) {
-        return kImmediateType;
-      } else {
-        return kUnsupported;
-      }
+      return kImmediateType;
   }
   return kUnsupported;
 }
-
 #undef OpcodeToBitNumber
 #undef FunctionFieldToBitNumber
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsLinkingInstruction() const {
+  switch (OpcodeFieldRaw()) {
+    case JAL:
+      return true;
+    case POP76:
+      if (RsFieldRawNoAssert() == JIALC)
+        return true;  // JIALC
+      else
+        return false;  // BNEZC
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BGEZAL:
+        case BLTZAL:
+          return true;
+        default:
+          return false;
+      }
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+    default:
+      return false;
+  }
+}
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+  if (OpcodeFieldRaw() != SPECIAL) {
+    return false;
+  } else {
+    switch (FunctionFieldRaw()) {
+      case BREAK:
+      case TGE:
+      case TGEU:
+      case TLT:
+      case TLTU:
+      case TEQ:
+      case TNE:
+        return true;
+      default:
+        return false;
+    }
+  }
+}
+
+// static
+template <class T>
+bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
+  Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+  switch (opcode) {
+    case J:
+    case JAL:
+    case BEQ:
+    case BNE:
+    case BLEZ:  // POP06 bgeuc/bleuc, blezalc, bgezalc
+    case BGTZ:  // POP07 bltuc/bgtuc, bgtzalc, bltzalc
+    case BEQL:
+    case BNEL:
+    case BLEZL:  // POP26 bgezc, blezc, bgec/blec
+    case BGTZL:  // POP27 bgtzc, bltzc, bltc/bgtc
+    case BC:
+    case BALC:
+    case POP10:  // beqzalc, bovc, beqc
+    case POP30:  // bnezalc, bnvc, bnec
+    case POP66:  // beqzc, jic
+    case POP76:  // bnezc, jialc
+      return true;
+    case REGIMM:
+      switch (instr & kRtFieldMask) {
+        case BLTZ:
+        case BGEZ:
+        case BLTZAL:
+        case BGEZAL:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    case SPECIAL:
+      switch (instr & kFunctionFieldMask) {
+        case JR:
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    case COP1:
+      switch (instr & kRsFieldMask) {
+        case BC1:
+        case BC1EQZ:
+        case BC1NEZ:
+          return true;
+          break;
+        default:
+          return false;
+      }
+      break;
+    default:
+      return false;
+  }
+}
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/disasm-mips64.cc b/src/mips64/disasm-mips64.cc
index 5485f3e..d73f22a 100644
--- a/src/mips64/disasm-mips64.cc
+++ b/src/mips64/disasm-mips64.cc
@@ -959,6 +959,12 @@
       case CVT_D_S:
         Format(instr, "cvt.d.'t 'fd, 'fs");
         break;
+      case MADDF_S:
+        Format(instr, "maddf.s  'fd, 'fs, 'ft");
+        break;
+      case MSUBF_S:
+        Format(instr, "msubf.s  'fd, 'fs, 'ft");
+        break;
       default:
         Format(instr, "unknown.cop1.'t");
         break;
@@ -969,7 +975,17 @@
 
 void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
   if (!DecodeTypeRegisterRsType(instr)) {
-    Format(instr, "unknown.cop1.'t");
+    switch (instr->FunctionFieldRaw()) {
+      case MADDF_D:
+        Format(instr, "maddf.d  'fd, 'fs, 'ft");
+        break;
+      case MSUBF_D:
+        Format(instr, "msubf.d  'fd, 'fs, 'ft");
+        break;
+      default:
+        Format(instr, "unknown.cop1.'t");
+        break;
+    }
   }
 }
 
@@ -1115,9 +1131,18 @@
 
 void Decoder::DecodeTypeRegisterCOP1X(Instruction* instr) {
   switch (instr->FunctionFieldRaw()) {
+    case MADD_S:
+      Format(instr, "madd.s  'fd, 'fr, 'fs, 'ft");
+      break;
     case MADD_D:
       Format(instr, "madd.d  'fd, 'fr, 'fs, 'ft");
       break;
+    case MSUB_S:
+      Format(instr, "msub.s  'fd, 'fr, 'fs, 'ft");
+      break;
+    case MSUB_D:
+      Format(instr, "msub.d  'fd, 'fr, 'fs, 'ft");
+      break;
     default:
       UNREACHABLE();
   }
@@ -1483,6 +1508,10 @@
       }
       break;
     }
+    case DINS: {
+      Format(instr, "dins    'rt, 'rs, 'sa, 'ss2");
+      break;
+    }
     case DBSHFL: {
       int sa = instr->SaFieldRaw() >> kSaShift;
       switch (sa) {
@@ -1917,7 +1946,7 @@
   out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                               "%08x       ",
                               instr->InstructionBits());
-  switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+  switch (instr->InstructionType()) {
     case Instruction::kRegisterType: {
       return DecodeTypeRegister(instr);
     }
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index 77c71aa..e5b9c2e 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -40,13 +40,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return a5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register StoreTransitionDescriptor::MapRegister() { return a5; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
@@ -356,7 +352,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       a0,  // callee
@@ -391,7 +387,19 @@
       a0,  // argument count (not including receiver)
       a3,  // new target
       a1,  // constructor to call
-      a2   // address of the first argument
+      a2,  // allocation site feedback if available, undefined otherwise.
+      a4   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      a0,  // argument count (not including receiver)
+      a1,  // the target to call verified to be Array function
+      a2,  // allocation site feedback
+      a3,  // address of first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index aa0de26..dd12f9b 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -200,9 +200,7 @@
                                 Condition cc,
                                 Label* branch) {
   DCHECK(cc == eq || cc == ne);
-  const int mask =
-      1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
-  CheckPageFlag(object, scratch, mask, cc, branch);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
 }
 
 
@@ -1260,8 +1258,13 @@
   if (rt.is_reg()) {
     sltu(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+    const uint64_t int16_min = std::numeric_limits<int16_t>::min();
+    if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      // Imm range is: [0, 32767].
       sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
+    } else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
+      // Imm range is: [max_unsigned-32767,max_unsigned].
+      sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
     } else {
       // li handles the relocation.
       DCHECK(!rs.is(at));
@@ -1960,10 +1963,14 @@
 }
 
 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
-  Register scratch1 = t8;
-  Register scratch2 = t9;
-  if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r6) {
+    // r6 neg_s changes the sign for NaN-like operands as well.
+    neg_s(fd, fs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
     Label is_nan, done;
+    Register scratch1 = t8;
+    Register scratch2 = t9;
     BranchF32(nullptr, &is_nan, eq, fs, fs);
     Branch(USE_DELAY_SLOT, &done);
     // For NaN input, neg_s will return the same NaN value,
@@ -1977,21 +1984,18 @@
     Or(scratch2, scratch2, scratch1);
     mtc1(scratch2, fd);
     bind(&done);
-  } else {
-    mfc1(scratch1, fs);
-    And(scratch2, scratch1, Operand(~kBinary32SignMask));
-    And(scratch1, scratch1, Operand(kBinary32SignMask));
-    Xor(scratch1, scratch1, Operand(kBinary32SignMask));
-    Or(scratch2, scratch2, scratch1);
-    mtc1(scratch2, fd);
   }
 }
 
 void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
-  Register scratch1 = t8;
-  Register scratch2 = t9;
-  if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r6) {
+    // r6 neg_d changes the sign for NaN-like operands as well.
+    neg_d(fd, fs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
     Label is_nan, done;
+    Register scratch1 = t8;
+    Register scratch2 = t9;
     BranchF64(nullptr, &is_nan, eq, fs, fs);
     Branch(USE_DELAY_SLOT, &done);
     // For NaN input, neg_d will return the same NaN value,
@@ -2005,13 +2009,6 @@
     Or(scratch2, scratch2, scratch1);
     dmtc1(scratch2, fd);
     bind(&done);
-  } else {
-    dmfc1(scratch1, fs);
-    And(scratch2, scratch1, Operand(~Double::kSignMask));
-    And(scratch1, scratch1, Operand(Double::kSignMask));
-    Xor(scratch1, scratch1, Operand(Double::kSignMask));
-    Or(scratch2, scratch2, scratch1);
-    dmtc1(scratch2, fd);
   }
 }
 
@@ -2387,7 +2384,8 @@
   DCHECK(nan || target);
   // Check for unordered (NaN) cases.
   if (nan) {
-    bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
+    bool long_branch =
+        nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
     if (kArchVariant != kMips64r6) {
       if (long_branch) {
         Label skip;
@@ -2427,7 +2425,7 @@
 
   if (target) {
     bool long_branch =
-        target->is_bound() ? is_near(target) : is_trampoline_emitted();
+        target->is_bound() ? !is_near(target) : is_trampoline_emitted();
     if (long_branch) {
       Label skip;
       Condition neg_cond = NegateFpuCondition(cond);
@@ -4379,7 +4377,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4543,7 +4541,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK(!AreAliased(result, scratch1, scratch2, at));
 
   // Make object size into bytes.
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index c96525c..4f67d70 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -243,6 +243,18 @@
                            Func GetLabelFunction);
 #undef COND_ARGS
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count,
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 780c90c..02387d0 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -96,7 +96,7 @@
   void RedoBreakpoints();
 };
 
-#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); }
 
 void MipsDebugger::Stop(Instruction* instr) {
   // Get the stop code.
@@ -741,8 +741,8 @@
   last_debugger_input_ = input;
 }
 
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
-                            size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+                            void* start_addr, size_t size) {
   int64_t start = reinterpret_cast<int64_t>(start_addr);
   int64_t intra_line = (start & CachePage::kLineMask);
   start -= intra_line;
@@ -762,7 +762,8 @@
   }
 }
 
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                   void* page) {
   base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
@@ -773,8 +774,8 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start,
-                             size_t size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+                             intptr_t start, size_t size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -786,7 +787,8 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+                            Instruction* instr) {
   int64_t address = reinterpret_cast<int64_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -819,7 +821,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new base::HashMap(&ICacheMatch);
+    i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -933,7 +935,8 @@
 
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
     for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
@@ -1935,15 +1938,15 @@
 
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
+void Simulator::SoftwareInterrupt() {
   // There are several instructions that could get us here,
   // the break_ instruction, or several variants of traps. All
   // Are "SPECIAL" class opcode, and are distinuished by function.
-  int32_t func = instr->FunctionFieldRaw();
-  uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+  int32_t func = instr_.FunctionFieldRaw();
+  uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
   // We first check if we met a call_rt_redirected.
-  if (instr->InstructionBits() == rtCallRedirInstr) {
-    Redirection* redirection = Redirection::FromSwiInstruction(instr);
+  if (instr_.InstructionBits() == rtCallRedirInstr) {
+    Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
     int64_t arg0 = get_register(a0);
     int64_t arg1 = get_register(a1);
     int64_t arg2 = get_register(a2);
@@ -2169,7 +2172,7 @@
       PrintWatchpoint(code);
     } else {
       IncreaseStopCounter(code);
-      HandleStop(code, instr);
+      HandleStop(code, instr_.instr());
     }
   } else {
     // All remaining break_ codes, and all traps are handled here.
@@ -2364,6 +2367,49 @@
   return result;
 }
 
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+                                              int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+  DCHECK(std::isnan(arg));
+  T qNaN = std::numeric_limits<T>::quiet_NaN();
+  if (keepSign == KeepSign::yes) {
+    return std::copysign(qNaN, result);
+  }
+  return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+  if (std::isnan(first)) {
+    return FPUCanonalizeNaNArg(result, first, keepSign);
+  }
+  return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+  if (std::isnan(first)) {
+    return FPUCanonalizeNaNArg(result, first, keepSign);
+  }
+  return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+  return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+  T result = f(first, args...);
+  if (std::isnan(result)) {
+    result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+  }
+  return result;
+}
+
 // Handle execution based on instruction types.
 
 void Simulator::DecodeTypeRegisterSRsType() {
@@ -2374,9 +2420,9 @@
   int32_t ft_int = bit_cast<int32_t>(ft);
   int32_t fd_int = bit_cast<int32_t>(fd);
   uint32_t cc, fcsr_cc;
-  cc = get_instr()->FCccValue();
+  cc = instr_.FCccValue();
   fcsr_cc = get_fcsr_condition_bit(cc);
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case RINT: {
       DCHECK(kArchVariant == kMips64r6);
       float result, temp_result;
@@ -2416,41 +2462,65 @@
       break;
     }
     case ADD_S:
-      set_fpu_register_float(fd_reg(), fs + ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+                                 fs, ft));
       break;
     case SUB_S:
-      set_fpu_register_float(fd_reg(), fs - ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+                                 fs, ft));
+      break;
+    case MADDF_S:
+      DCHECK(kArchVariant == kMips64r6);
+      set_fpu_register_float(fd_reg(), fd + (fs * ft));
+      break;
+    case MSUBF_S:
+      DCHECK(kArchVariant == kMips64r6);
+      set_fpu_register_float(fd_reg(), fd - (fs * ft));
       break;
     case MUL_S:
-      set_fpu_register_float(fd_reg(), fs * ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+                                 fs, ft));
       break;
     case DIV_S:
-      set_fpu_register_float(fd_reg(), fs / ft);
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+                                 fs, ft));
       break;
     case ABS_S:
-      set_fpu_register_float(fd_reg(), fabs(fs));
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_S:
       set_fpu_register_float(fd_reg(), fs);
       break;
     case NEG_S:
-      set_fpu_register_float(fd_reg(), -fs);
+      set_fpu_register_float(
+          fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
+                                           KeepSign::yes, fs));
       break;
     case SQRT_S:
-      lazily_initialize_fast_sqrt(isolate_);
-      set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
       break;
-    case RSQRT_S: {
-      lazily_initialize_fast_sqrt(isolate_);
-      float result = 1.0 / fast_sqrt(fs, isolate_);
-      set_fpu_register_float(fd_reg(), result);
+    case RSQRT_S:
+      set_fpu_register_float(
+          fd_reg(), FPUCanonalizeOperation(
+                        [](float src) { return 1.0 / std::sqrt(src); }, fs));
       break;
-    }
-    case RECIP_S: {
-      float result = 1.0 / fs;
-      set_fpu_register_float(fd_reg(), result);
+    case RECIP_S:
+      set_fpu_register_float(
+          fd_reg(),
+          FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
       break;
-    }
     case C_F_D:
       set_fcsr_bit(fcsr_cc, false);
       break;
@@ -2696,7 +2766,7 @@
       uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
       ft_cc = get_fcsr_condition_bit(ft_cc);
 
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
         if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
       } else {
@@ -2717,15 +2787,14 @@
   double ft, fs, fd;
   uint32_t cc, fcsr_cc;
   fs = get_fpu_register_double(fs_reg());
-  ft = (get_instr()->FunctionFieldRaw() != MOVF)
-           ? get_fpu_register_double(ft_reg())
-           : 0.0;
+  ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg())
+                                           : 0.0;
   fd = get_fpu_register_double(fd_reg());
-  cc = get_instr()->FCccValue();
+  cc = instr_.FCccValue();
   fcsr_cc = get_fcsr_condition_bit(cc);
   int64_t ft_int = bit_cast<int64_t>(ft);
   int64_t fd_int = bit_cast<int64_t>(fd);
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case RINT: {
       DCHECK(kArchVariant == kMips64r6);
       double result, temp, temp_result;
@@ -2793,7 +2862,7 @@
       // Same function field for MOVT.D and MOVF.D
       uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
       ft_cc = get_fcsr_condition_bit(ft_cc);
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
         if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
       } else {
@@ -2819,41 +2888,65 @@
       set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
       break;
     case ADD_D:
-      set_fpu_register_double(fd_reg(), fs + ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
       break;
     case SUB_D:
-      set_fpu_register_double(fd_reg(), fs - ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
+      break;
+    case MADDF_D:
+      DCHECK(kArchVariant == kMips64r6);
+      set_fpu_register_double(fd_reg(), fd + (fs * ft));
+      break;
+    case MSUBF_D:
+      DCHECK(kArchVariant == kMips64r6);
+      set_fpu_register_double(fd_reg(), fd - (fs * ft));
       break;
     case MUL_D:
-      set_fpu_register_double(fd_reg(), fs * ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
       break;
     case DIV_D:
-      set_fpu_register_double(fd_reg(), fs / ft);
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation(
+              [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
       break;
     case ABS_D:
-      set_fpu_register_double(fd_reg(), fabs(fs));
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_D:
       set_fpu_register_double(fd_reg(), fs);
       break;
     case NEG_D:
-      set_fpu_register_double(fd_reg(), -fs);
+      set_fpu_register_double(
+          fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
+                                           KeepSign::yes, fs));
       break;
     case SQRT_D:
-      lazily_initialize_fast_sqrt(isolate_);
-      set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
       break;
-    case RSQRT_D: {
-      lazily_initialize_fast_sqrt(isolate_);
-      double result = 1.0 / fast_sqrt(fs, isolate_);
-      set_fpu_register_double(fd_reg(), result);
+    case RSQRT_D:
+      set_fpu_register_double(
+          fd_reg(), FPUCanonalizeOperation(
+                        [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
       break;
-    }
-    case RECIP_D: {
-      double result = 1.0 / fs;
-      set_fpu_register_double(fd_reg(), result);
+    case RECIP_D:
+      set_fpu_register_double(
+          fd_reg(),
+          FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
       break;
-    }
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
       break;
@@ -3060,7 +3153,7 @@
   float fs = get_fpu_register_float(fs_reg());
   float ft = get_fpu_register_float(ft_reg());
   int64_t alu_out = 0x12345678;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case CVT_S_W:  // Convert word to float (single).
       alu_out = get_fpu_register_signed_word(fs_reg());
       set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
@@ -3152,7 +3245,7 @@
   double fs = get_fpu_register_double(fs_reg());
   double ft = get_fpu_register_double(ft_reg());
   int64_t i64;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case CVT_D_L:  // Mips32r2 instruction.
       i64 = get_fpu_register(fs_reg());
       set_fpu_register_double(fd_reg(), static_cast<double>(i64));
@@ -3241,7 +3334,7 @@
 
 
 void Simulator::DecodeTypeRegisterCOP1() {
-  switch (get_instr()->RsFieldRaw()) {
+  switch (instr_.RsFieldRaw()) {
     case BC1:  // Branch on coprocessor condition.
     case BC1EQZ:
     case BC1NEZ:
@@ -3304,14 +3397,43 @@
 
 
 void Simulator::DecodeTypeRegisterCOP1X() {
-  switch (get_instr()->FunctionFieldRaw()) {
-    case MADD_D:
+  switch (instr_.FunctionFieldRaw()) {
+    case MADD_S: {
+      DCHECK(kArchVariant == kMips64r2);
+      float fr, ft, fs;
+      fr = get_fpu_register_float(fr_reg());
+      fs = get_fpu_register_float(fs_reg());
+      ft = get_fpu_register_float(ft_reg());
+      set_fpu_register_float(fd_reg(), fs * ft + fr);
+      break;
+    }
+    case MSUB_S: {
+      DCHECK(kArchVariant == kMips64r2);
+      float fr, ft, fs;
+      fr = get_fpu_register_float(fr_reg());
+      fs = get_fpu_register_float(fs_reg());
+      ft = get_fpu_register_float(ft_reg());
+      set_fpu_register_float(fd_reg(), fs * ft - fr);
+      break;
+    }
+    case MADD_D: {
+      DCHECK(kArchVariant == kMips64r2);
       double fr, ft, fs;
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
       set_fpu_register_double(fd_reg(), fs * ft + fr);
       break;
+    }
+    case MSUB_D: {
+      DCHECK(kArchVariant == kMips64r2);
+      double fr, ft, fs;
+      fr = get_fpu_register_double(fr_reg());
+      fs = get_fpu_register_double(fs_reg());
+      ft = get_fpu_register_double(ft_reg());
+      set_fpu_register_double(fd_reg(), fs * ft - fr);
+      break;
+    }
     default:
       UNREACHABLE();
   }
@@ -3324,7 +3446,7 @@
   int64_t alu_out;
   bool do_interrupt = false;
 
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case SELEQZ_S:
       DCHECK(kArchVariant == kMips64r6);
       set_register(rd_reg(), rt() == 0 ? rs() : 0);
@@ -3570,7 +3692,7 @@
     case DIV:
     case DDIV: {
       const int64_t int_min_value =
-          get_instr()->FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
+          instr_.FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
       switch (kArchVariant) {
         case kMips64r2:
           // Divide by zero and overflow was not checked in the
@@ -3616,7 +3738,7 @@
         case kMips64r6: {
           uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
           uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
-          switch (get_instr()->SaValue()) {
+          switch (sa()) {
             case DIV_OP:
               if (rt_u_32 != 0) {
                 set_register(rd_reg(), rs_u_32 / rt_u_32);
@@ -3645,7 +3767,7 @@
     case DDIVU:
       switch (kArchVariant) {
         case kMips64r6: {
-          switch (get_instr()->SaValue()) {
+          switch (instr_.SaValue()) {
             case DIV_OP:
               if (rt_u() != 0) {
                 set_register(rd_reg(), rs_u() / rt_u());
@@ -3767,9 +3889,9 @@
       }
       break;
     case MOVCI: {
-      uint32_t cc = get_instr()->FBccValue();
+      uint32_t cc = instr_.FBccValue();
       uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
-      if (get_instr()->Bit(16)) {  // Read Tf bit.
+      if (instr_.Bit(16)) {  // Read Tf bit.
         if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
       } else {
         if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
@@ -3785,14 +3907,14 @@
       UNREACHABLE();
   }
   if (do_interrupt) {
-    SoftwareInterrupt(get_instr());
+    SoftwareInterrupt();
   }
 }
 
 
 void Simulator::DecodeTypeRegisterSPECIAL2() {
   int64_t alu_out;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case MUL:
       alu_out = static_cast<int32_t>(rs_u()) * static_cast<int32_t>(rt_u());
       SetResult(rd_reg(), alu_out);
@@ -3821,7 +3943,7 @@
 
 void Simulator::DecodeTypeRegisterSPECIAL3() {
   int64_t alu_out;
-  switch (get_instr()->FunctionFieldRaw()) {
+  switch (instr_.FunctionFieldRaw()) {
     case INS: {  // Mips64r2 instruction.
       // Interpret rd field as 5-bit msb of insert.
       uint16_t msb = rd_reg();
@@ -3890,7 +4012,7 @@
       break;
     }
     case BSHFL: {
-      int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+      int32_t sa = instr_.SaFieldRaw() >> kSaShift;
       switch (sa) {
         case BITSWAP: {
           uint32_t input = static_cast<uint32_t>(rt());
@@ -3968,7 +4090,7 @@
           break;
         }
         default: {
-          const uint8_t bp2 = get_instr()->Bp2Value();
+          const uint8_t bp2 = instr_.Bp2Value();
           sa >>= kBp2Bits;
           switch (sa) {
             case ALIGN: {
@@ -3993,7 +4115,7 @@
       break;
     }
     case DBSHFL: {
-      int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+      int32_t sa = instr_.SaFieldRaw() >> kSaShift;
       switch (sa) {
         case DBITSWAP: {
           switch (sa) {
@@ -4067,7 +4189,7 @@
           break;
         }
         default: {
-          const uint8_t bp3 = get_instr()->Bp3Value();
+          const uint8_t bp3 = instr_.Bp3Value();
           sa >>= kBp3Bits;
           switch (sa) {
             case DALIGN: {
@@ -4096,12 +4218,9 @@
   }
 }
 
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
-  set_instr(instr);
-
+void Simulator::DecodeTypeRegister() {
   // ---------- Execution.
-  switch (instr->OpcodeFieldRaw()) {
+  switch (instr_.OpcodeFieldRaw()) {
     case COP1:
       DecodeTypeRegisterCOP1();
       break;
@@ -4127,18 +4246,18 @@
 
 
 // Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
+void Simulator::DecodeTypeImmediate() {
   // Instruction fields.
-  Opcode op = instr->OpcodeFieldRaw();
-  int32_t rs_reg = instr->RsValue();
-  int64_t rs = get_register(instr->RsValue());
+  Opcode op = instr_.OpcodeFieldRaw();
+  int32_t rs_reg = instr_.RsValue();
+  int64_t rs = get_register(instr_.RsValue());
   uint64_t rs_u = static_cast<uint64_t>(rs);
-  int32_t rt_reg = instr->RtValue();  // Destination register.
+  int32_t rt_reg = instr_.RtValue();  // Destination register.
   int64_t rt = get_register(rt_reg);
-  int16_t imm16 = instr->Imm16Value();
-  int32_t imm18 = instr->Imm18Value();
+  int16_t imm16 = instr_.Imm16Value();
+  int32_t imm18 = instr_.Imm18Value();
 
-  int32_t ft_reg = instr->FtValue();  // Destination register.
+  int32_t ft_reg = instr_.FtValue();  // Destination register.
 
   // Zero extended immediate.
   uint64_t oe_imm16 = 0xffff & imm16;
@@ -4163,38 +4282,36 @@
   const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
 
   // Branch instructions common part.
-  auto BranchAndLinkHelper = [this, instr, &next_pc,
-                              &execute_branch_delay_instruction](
-      bool do_branch) {
-    execute_branch_delay_instruction = true;
-    int64_t current_pc = get_pc();
-    if (do_branch) {
-      int16_t imm16 = instr->Imm16Value();
-      next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
-      set_register(31, current_pc + 2 * Instruction::kInstrSize);
-    } else {
-      next_pc = current_pc + 2 * Instruction::kInstrSize;
-    }
-  };
+  auto BranchAndLinkHelper =
+      [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
+        execute_branch_delay_instruction = true;
+        int64_t current_pc = get_pc();
+        if (do_branch) {
+          int16_t imm16 = instr_.Imm16Value();
+          next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          set_register(31, current_pc + 2 * Instruction::kInstrSize);
+        } else {
+          next_pc = current_pc + 2 * Instruction::kInstrSize;
+        }
+      };
 
-  auto BranchHelper = [this, instr, &next_pc,
+  auto BranchHelper = [this, &next_pc,
                        &execute_branch_delay_instruction](bool do_branch) {
     execute_branch_delay_instruction = true;
     int64_t current_pc = get_pc();
     if (do_branch) {
-      int16_t imm16 = instr->Imm16Value();
+      int16_t imm16 = instr_.Imm16Value();
       next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
     } else {
       next_pc = current_pc + 2 * Instruction::kInstrSize;
     }
   };
 
-  auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
-                                                            int bits) {
+  auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
     int64_t current_pc = get_pc();
     CheckForbiddenSlot(current_pc);
     if (do_branch) {
-      int32_t imm = instr->ImmValue(bits);
+      int32_t imm = instr_.ImmValue(bits);
       imm <<= 32 - bits;
       imm >>= 32 - bits;
       next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
@@ -4202,11 +4319,11 @@
     }
   };
 
-  auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+  auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) {
     int64_t current_pc = get_pc();
     CheckForbiddenSlot(current_pc);
     if (do_branch) {
-      int32_t imm = instr->ImmValue(bits);
+      int32_t imm = instr_.ImmValue(bits);
       imm <<= 32 - bits;
       imm >>= 32 - bits;
       next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
@@ -4216,12 +4333,12 @@
   switch (op) {
     // ------------- COP1. Coprocessor instructions.
     case COP1:
-      switch (instr->RsFieldRaw()) {
+      switch (instr_.RsFieldRaw()) {
         case BC1: {  // Branch on coprocessor condition.
-          uint32_t cc = instr->FBccValue();
+          uint32_t cc = instr_.FBccValue();
           uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
           uint32_t cc_value = test_fcsr_bit(fcsr_cc);
-          bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+          bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value;
           BranchHelper(do_branch);
           break;
         }
@@ -4237,7 +4354,7 @@
       break;
     // ------------- REGIMM class.
     case REGIMM:
-      switch (instr->RtFieldRaw()) {
+      switch (instr_.RtFieldRaw()) {
         case BLTZ:
           BranchHelper(rs < 0);
           break;
@@ -4455,7 +4572,7 @@
       set_register(rt_reg, ReadB(rs + se_imm16));
       break;
     case LH:
-      set_register(rt_reg, ReadH(rs + se_imm16, instr));
+      set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr()));
       break;
     case LWL: {
       // al_offset is offset of the effective address within an aligned word.
@@ -4463,26 +4580,26 @@
       uint8_t byte_shift = kInt32AlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      int32_t val = ReadW(addr, instr);
+      int32_t val = ReadW(addr, instr_.instr());
       val <<= byte_shift * 8;
       val |= rt & mask;
       set_register(rt_reg, static_cast<int64_t>(val));
       break;
     }
     case LW:
-      set_register(rt_reg, ReadW(rs + se_imm16, instr));
+      set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
       break;
     case LWU:
-      set_register(rt_reg, ReadWU(rs + se_imm16, instr));
+      set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr()));
       break;
     case LD:
-      set_register(rt_reg, Read2W(rs + se_imm16, instr));
+      set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr()));
       break;
     case LBU:
       set_register(rt_reg, ReadBU(rs + se_imm16));
       break;
     case LHU:
-      set_register(rt_reg, ReadHU(rs + se_imm16, instr));
+      set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr()));
       break;
     case LWR: {
       // al_offset is offset of the effective address within an aligned word.
@@ -4490,7 +4607,7 @@
       uint8_t byte_shift = kInt32AlignmentMask - al_offset;
       uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
       addr = rs + se_imm16 - al_offset;
-      alu_out = ReadW(addr, instr);
+      alu_out = ReadW(addr, instr_.instr());
       alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
       alu_out |= rt & mask;
       set_register(rt_reg, alu_out);
@@ -4502,7 +4619,7 @@
       uint8_t byte_shift = kInt64AlignmentMask - al_offset;
       uint64_t mask = (1UL << byte_shift * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      alu_out = Read2W(addr, instr);
+      alu_out = Read2W(addr, instr_.instr());
       alu_out <<= byte_shift * 8;
       alu_out |= rt & mask;
       set_register(rt_reg, alu_out);
@@ -4514,7 +4631,7 @@
       uint8_t byte_shift = kInt64AlignmentMask - al_offset;
       uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
       addr = rs + se_imm16 - al_offset;
-      alu_out = Read2W(addr, instr);
+      alu_out = Read2W(addr, instr_.instr());
       alu_out = alu_out >> al_offset * 8;
       alu_out |= rt & mask;
       set_register(rt_reg, alu_out);
@@ -4524,31 +4641,31 @@
       WriteB(rs + se_imm16, static_cast<int8_t>(rt));
       break;
     case SH:
-      WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
+      WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr_.instr());
       break;
     case SWL: {
       uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
       uint8_t byte_shift = kInt32AlignmentMask - al_offset;
       uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
       addr = rs + se_imm16 - al_offset;
-      uint64_t mem_value = ReadW(addr, instr) & mask;
+      uint64_t mem_value = ReadW(addr, instr_.instr()) & mask;
       mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
-      WriteW(addr, static_cast<int32_t>(mem_value), instr);
+      WriteW(addr, static_cast<int32_t>(mem_value), instr_.instr());
       break;
     }
     case SW:
-      WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr);
+      WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr_.instr());
       break;
     case SD:
-      Write2W(rs + se_imm16, rt, instr);
+      Write2W(rs + se_imm16, rt, instr_.instr());
       break;
     case SWR: {
       uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
       uint32_t mask = (1 << al_offset * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      uint64_t mem_value = ReadW(addr, instr);
+      uint64_t mem_value = ReadW(addr, instr_.instr());
       mem_value = (rt << al_offset * 8) | (mem_value & mask);
-      WriteW(addr, static_cast<int32_t>(mem_value), instr);
+      WriteW(addr, static_cast<int32_t>(mem_value), instr_.instr());
       break;
     }
     case SDL: {
@@ -4556,39 +4673,39 @@
       uint8_t byte_shift = kInt64AlignmentMask - al_offset;
       uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
       addr = rs + se_imm16 - al_offset;
-      uint64_t mem_value = Read2W(addr, instr) & mask;
+      uint64_t mem_value = Read2W(addr, instr_.instr()) & mask;
       mem_value |= rt >> byte_shift * 8;
-      Write2W(addr, mem_value, instr);
+      Write2W(addr, mem_value, instr_.instr());
       break;
     }
     case SDR: {
       uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
       uint64_t mask = (1UL << al_offset * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      uint64_t mem_value = Read2W(addr, instr);
+      uint64_t mem_value = Read2W(addr, instr_.instr());
       mem_value = (rt << al_offset * 8) | (mem_value & mask);
-      Write2W(addr, mem_value, instr);
+      Write2W(addr, mem_value, instr_.instr());
       break;
     }
     case LWC1:
       set_fpu_register(ft_reg, kFPUInvalidResult);  // Trash upper 32 bits.
-      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
+      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
       break;
     case LDC1:
-      set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
+      set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
       break;
     case SWC1: {
       int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(ft_reg));
-      WriteW(rs + se_imm16, alu_out_32, instr);
+      WriteW(rs + se_imm16, alu_out_32, instr_.instr());
       break;
     }
     case SDC1:
-      WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
+      WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
       break;
     // ------------- PC-Relative instructions.
     case PCREL: {
       // rt field: checking 5-bits.
-      int32_t imm21 = instr->Imm21Value();
+      int32_t imm21 = instr_.Imm21Value();
       int64_t current_pc = get_pc();
       uint8_t rt = (imm21 >> kImm16Bits);
       switch (rt) {
@@ -4600,14 +4717,14 @@
           alu_out = current_pc + (se_imm16 << 16);
           break;
         default: {
-          int32_t imm19 = instr->Imm19Value();
+          int32_t imm19 = instr_.Imm19Value();
           // rt field: checking the most significant 3-bits.
           rt = (imm21 >> kImm18Bits);
           switch (rt) {
             case LDPC:
               addr =
                   (current_pc & static_cast<int64_t>(~0x7)) + (se_imm18 << 3);
-              alu_out = Read2W(addr, instr);
+              alu_out = Read2W(addr, instr_.instr());
               break;
             default: {
               // rt field: checking the most significant 2-bits.
@@ -4671,13 +4788,14 @@
 
 
 // Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
-void Simulator::DecodeTypeJump(Instruction* instr) {
+void Simulator::DecodeTypeJump() {
+  SimInstruction simInstr = instr_;
   // Get current pc.
   int64_t current_pc = get_pc();
   // Get unchanged bits of pc.
   int64_t pc_high_bits = current_pc & 0xfffffffff0000000;
   // Next pc.
-  int64_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+  int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
 
   // Execute branch delay slot.
   // We don't check for end_sim_pc. First it should not be met as the current pc
@@ -4688,7 +4806,7 @@
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
-  if (instr->IsLinkingInstruction()) {
+  if (simInstr.IsLinkingInstruction()) {
     set_register(31, current_pc + 2 * Instruction::kInstrSize);
   }
   set_pc(next_pc);
@@ -4713,15 +4831,16 @@
     dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
   }
 
-  switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+  instr_ = instr;
+  switch (instr_.InstructionType()) {
     case Instruction::kRegisterType:
-      DecodeTypeRegister(instr);
+      DecodeTypeRegister();
       break;
     case Instruction::kImmediateType:
-      DecodeTypeImmediate(instr);
+      DecodeTypeImmediate();
       break;
     case Instruction::kJumpType:
-      DecodeTypeJump(instr);
+      DecodeTypeJump();
       break;
     default:
       UNSUPPORTED();
diff --git a/src/mips64/simulator-mips64.h b/src/mips64/simulator-mips64.h
index cd606e2..df98465 100644
--- a/src/mips64/simulator-mips64.h
+++ b/src/mips64/simulator-mips64.h
@@ -122,6 +122,39 @@
   char validity_map_[kValidityMapSize];  // One byte per line.
 };
 
+class SimInstructionBase : public InstructionBase {
+ public:
+  Type InstructionType() const { return type_; }
+  inline Instruction* instr() const { return instr_; }
+  inline int32_t operand() const { return operand_; }
+
+ protected:
+  SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+  explicit SimInstructionBase(Instruction* instr) {}
+
+  int32_t operand_;
+  Instruction* instr_;
+  Type type_;
+
+ private:
+  DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+  SimInstruction() {}
+
+  explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+  SimInstruction& operator=(Instruction* instr) {
+    operand_ = *reinterpret_cast<const int32_t*>(instr);
+    instr_ = instr;
+    type_ = InstructionBase::InstructionType();
+    DCHECK(reinterpret_cast<void*>(&operand_) == this);
+    return *this;
+  }
+};
+
 class Simulator {
  public:
   friend class MipsDebugger;
@@ -226,7 +259,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -246,7 +279,8 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_ra, end_sim_pc).
@@ -314,6 +348,8 @@
   inline int32_t SetDoubleHIW(double* addr);
   inline int32_t SetDoubleLOW(double* addr);
 
+  SimInstruction instr_;
+
   // functions called from DecodeTypeRegister.
   void DecodeTypeRegisterCOP1();
 
@@ -335,40 +371,36 @@
   void DecodeTypeRegisterLRsType();
 
   // Executing is handled based on the instruction type.
-  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeRegister();
 
-  Instruction* currentInstr_;
-  inline Instruction* get_instr() const { return currentInstr_; }
-  inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
-
-  inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+  inline int32_t rs_reg() const { return instr_.RsValue(); }
   inline int64_t rs() const { return get_register(rs_reg()); }
   inline uint64_t rs_u() const {
     return static_cast<uint64_t>(get_register(rs_reg()));
   }
-  inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+  inline int32_t rt_reg() const { return instr_.RtValue(); }
   inline int64_t rt() const { return get_register(rt_reg()); }
   inline uint64_t rt_u() const {
     return static_cast<uint64_t>(get_register(rt_reg()));
   }
-  inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
-  inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
-  inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
-  inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
-  inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
-  inline int32_t sa() const { return currentInstr_->SaValue(); }
-  inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
+  inline int32_t rd_reg() const { return instr_.RdValue(); }
+  inline int32_t fr_reg() const { return instr_.FrValue(); }
+  inline int32_t fs_reg() const { return instr_.FsValue(); }
+  inline int32_t ft_reg() const { return instr_.FtValue(); }
+  inline int32_t fd_reg() const { return instr_.FdValue(); }
+  inline int32_t sa() const { return instr_.SaValue(); }
+  inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
 
   inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
     set_register(rd_reg, alu_out);
     TraceRegWr(alu_out);
   }
 
-  void DecodeTypeImmediate(Instruction* instr);
-  void DecodeTypeJump(Instruction* instr);
+  void DecodeTypeImmediate();
+  void DecodeTypeJump();
 
   // Used for breakpoints and traps.
-  void SoftwareInterrupt(Instruction* instr);
+  void SoftwareInterrupt();
 
   // Compact branch guard.
   void CheckForbiddenSlot(int64_t current_pc) {
@@ -414,9 +446,12 @@
   }
 
   // ICache.
-  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, size_t size);
-  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+  static void CheckICache(base::CustomMatcherHashMap* i_cache,
+                          Instruction* instr);
+  static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+                           size_t size);
+  static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                 void* page);
 
   enum Exception {
     none,
@@ -461,7 +496,7 @@
   char* last_debugger_input_;
 
   // Icache simulation.
-  base::HashMap* i_cache_;
+  base::CustomMatcherHashMap* i_cache_;
 
   v8::internal::Isolate* isolate_;
 
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index ccee37b..0252b64 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -465,7 +465,6 @@
     case JS_PROMISE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
@@ -475,6 +474,7 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
+    case JS_STRING_ITERATOR_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 7d426a0..3c43f23 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -152,6 +152,9 @@
     case JS_MAP_ITERATOR_TYPE:
       JSMapIterator::cast(this)->JSMapIteratorVerify();
       break;
+    case JS_STRING_ITERATOR_TYPE:
+      JSStringIterator::cast(this)->JSStringIteratorVerify();
+      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
@@ -562,6 +565,7 @@
   VerifyObjectField(kOptimizedCodeMapOffset);
   VerifyObjectField(kFeedbackMetadataOffset);
   VerifyObjectField(kScopeInfoOffset);
+  VerifyObjectField(kOuterScopeInfoOffset);
   VerifyObjectField(kInstanceClassNameOffset);
   CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
         HasBytecodeArray() || HasAsmWasmData());
@@ -778,6 +782,14 @@
   CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
 }
 
+void JSStringIterator::JSStringIteratorVerify() {
+  CHECK(IsJSStringIterator());
+  JSObjectVerify();
+  CHECK(string()->IsString());
+
+  CHECK_GE(index(), 0);
+  CHECK_LE(index(), String::kMaxLength);
+}
 
 void JSWeakSet::JSWeakSetVerify() {
   CHECK(IsJSWeakSet());
@@ -831,7 +843,6 @@
   }
 }
 
-
 void JSProxy::JSProxyVerify() {
   CHECK(IsJSProxy());
   VerifyPointer(target());
@@ -877,9 +888,7 @@
   CHECK(IsJSTypedArray());
   JSArrayBufferViewVerify();
   VerifyPointer(raw_length());
-  CHECK(raw_length()->IsSmi() || raw_length()->IsHeapNumber() ||
-        raw_length()->IsUndefined(GetIsolate()));
-
+  CHECK(raw_length()->IsSmi() || raw_length()->IsUndefined(GetIsolate()));
   VerifyPointer(elements());
 }
 
@@ -900,6 +909,27 @@
   value()->ObjectVerify();
 }
 
+void PromiseContainer::PromiseContainerVerify() {
+  CHECK(IsPromiseContainer());
+  thenable()->ObjectVerify();
+  then()->ObjectVerify();
+  resolve()->ObjectVerify();
+  reject()->ObjectVerify();
+  before_debug_event()->ObjectVerify();
+  after_debug_event()->ObjectVerify();
+}
+
+void Module::ModuleVerify() {
+  CHECK(IsModule());
+  CHECK(code()->IsSharedFunctionInfo() || code()->IsJSFunction());
+  code()->ObjectVerify();
+  exports()->ObjectVerify();
+  requested_modules()->ObjectVerify();
+  VerifySmiField(kFlagsOffset);
+  embedder_data()->ObjectVerify();
+  CHECK(shared()->name()->IsSymbol());
+  // TODO(neis): Check more.
+}
 
 void PrototypeInfo::PrototypeInfoVerify() {
   CHECK(IsPrototypeInfo());
@@ -911,10 +941,8 @@
   CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
 }
 
-
-void SloppyBlockWithEvalContextExtension::
-    SloppyBlockWithEvalContextExtensionVerify() {
-  CHECK(IsSloppyBlockWithEvalContextExtension());
+void ContextExtension::ContextExtensionVerify() {
+  CHECK(IsContextExtension());
   VerifyObjectField(kScopeInfoOffset);
   VerifyObjectField(kExtensionOffset);
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 3d82bf8..af12615 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -27,6 +27,7 @@
 #include "src/isolate.h"
 #include "src/keys.h"
 #include "src/layout-descriptor-inl.h"
+#include "src/lookup-cache-inl.h"
 #include "src/lookup.h"
 #include "src/objects.h"
 #include "src/property.h"
@@ -700,6 +701,7 @@
 TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
 TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
 TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
 
 bool HeapObject::IsJSWeakCollection() const {
   return IsJSWeakMap() || IsJSWeakSet();
@@ -709,6 +711,8 @@
 
 bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
 
+bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
+
 bool HeapObject::IsArrayList() const { return IsFixedArray(); }
 
 bool Object::IsLayoutDescriptor() const {
@@ -790,6 +794,13 @@
   return map() == GetHeap()->scope_info_map();
 }
 
+bool HeapObject::IsModuleInfoEntry() const {
+  return map() == GetHeap()->module_info_entry_map();
+}
+
+bool HeapObject::IsModuleInfo() const {
+  return map() == GetHeap()->module_info_map();
+}
 
 TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
 TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
@@ -2103,6 +2114,8 @@
       return JSArgumentsObject::kHeaderSize;
     case JS_ERROR_TYPE:
       return JSObject::kHeaderSize;
+    case JS_STRING_ITERATOR_TYPE:
+      return JSStringIterator::kSize;
     default:
       UNREACHABLE();
       return 0;
@@ -2610,6 +2623,29 @@
   return HeapObject::RawField(this, OffsetOfElementAt(index));
 }
 
+#define DEFINE_FRAME_ARRAY_ACCESSORS(name, type)                              \
+  type* FrameArray::name(int frame_ix) const {                                \
+    Object* obj =                                                             \
+        get(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset);    \
+    return type::cast(obj);                                                   \
+  }                                                                           \
+                                                                              \
+  void FrameArray::Set##name(int frame_ix, type* value) {                     \
+    set(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset, value); \
+  }
+FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
+#undef DEFINE_FRAME_ARRAY_ACCESSORS
+
+bool FrameArray::IsWasmFrame(int frame_ix) const {
+  const int flags = Flags(frame_ix)->value();
+  return (flags & kIsWasmFrame) != 0;
+}
+
+int FrameArray::FrameCount() const {
+  const int frame_count = Smi::cast(get(kFrameCountIndex))->value();
+  DCHECK_LE(0, frame_count);
+  return frame_count;
+}
 
 bool DescriptorArray::IsEmpty() {
   DCHECK(length() >= kFirstIndex ||
@@ -3223,6 +3259,7 @@
 CAST_ACCESSOR(FixedTypedArrayBase)
 CAST_ACCESSOR(Float32x4)
 CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FrameArray)
 CAST_ACCESSOR(GlobalDictionary)
 CAST_ACCESSOR(HandlerTable)
 CAST_ACCESSOR(HeapObject)
@@ -3248,6 +3285,7 @@
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSSet)
 CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSStringIterator)
 CAST_ACCESSOR(JSTypedArray)
 CAST_ACCESSOR(JSValue)
 CAST_ACCESSOR(JSWeakCollection)
@@ -3255,6 +3293,8 @@
 CAST_ACCESSOR(JSWeakSet)
 CAST_ACCESSOR(LayoutDescriptor)
 CAST_ACCESSOR(Map)
+CAST_ACCESSOR(ModuleInfoEntry)
+CAST_ACCESSOR(ModuleInfo)
 CAST_ACCESSOR(Name)
 CAST_ACCESSOR(NameDictionary)
 CAST_ACCESSOR(NormalizedMapCache)
@@ -5614,6 +5654,13 @@
 
 ACCESSORS(Box, value, Object, kValueOffset)
 
+ACCESSORS(PromiseContainer, thenable, JSReceiver, kThenableOffset)
+ACCESSORS(PromiseContainer, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseContainer, resolve, JSFunction, kResolveOffset)
+ACCESSORS(PromiseContainer, reject, JSFunction, kRejectOffset)
+ACCESSORS(PromiseContainer, before_debug_event, Object, kBeforeDebugEventOffset)
+ACCESSORS(PromiseContainer, after_debug_event, Object, kAfterDebugEventOffset)
+
 Map* PrototypeInfo::ObjectCreateMap() {
   return Map::cast(WeakCell::cast(object_create_map())->value());
 }
@@ -5662,10 +5709,26 @@
 SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
 BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
 
-ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
-          kScopeInfoOffset)
-ACCESSORS(SloppyBlockWithEvalContextExtension, extension, JSObject,
-          kExtensionOffset)
+ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
+ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
+
+ACCESSORS(Module, code, Object, kCodeOffset)
+ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
+ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
+SMI_ACCESSORS(Module, flags, kFlagsOffset)
+BOOL_ACCESSORS(Module, flags, evaluated, kEvaluatedBit)
+ACCESSORS(Module, embedder_data, Object, kEmbedderDataOffset)
+
+SharedFunctionInfo* Module::shared() const {
+  return code()->IsSharedFunctionInfo() ? SharedFunctionInfo::cast(code())
+                                        : JSFunction::cast(code())->shared();
+}
+
+ModuleInfo* Module::info() const {
+  return shared()->scope_info()->ModuleDescriptorInfo();
+}
+
+uint32_t Module::Hash() const { return Symbol::cast(shared()->name())->Hash(); }
 
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -5679,8 +5742,10 @@
 ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
 ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
 ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
+ACCESSORS(InterceptorInfo, descriptor, Object, kDescriptorOffset)
 ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
 ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
+ACCESSORS(InterceptorInfo, definer, Object, kDefinerOffset)
 ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
 SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
 BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
@@ -6031,8 +6096,7 @@
   set_compiler_hints(hints);
 }
 
-
-FunctionKind SharedFunctionInfo::kind() {
+FunctionKind SharedFunctionInfo::kind() const {
   return FunctionKindBits::decode(compiler_hints());
 }
 
@@ -6057,23 +6121,12 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
                kDontCrankshaft)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_async, kIsAsyncFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
-               kIsConciseMethod)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
-               kIsGetterFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_setter_function,
-               kIsSetterFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
-               kIsDefaultConstructor)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
                kIsAsmWasmBroken)
-
-inline bool SharedFunctionInfo::is_resumable() const {
-  return is_generator() || is_async();
-}
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, requires_class_field_init,
+               kRequiresClassFieldInit)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_class_field_initializer,
+               kIsClassFieldInitializer)
 
 bool Script::HasValidSource() {
   Object* src = this->source();
@@ -6155,6 +6208,9 @@
                             mode);
 }
 
+ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
+          kOuterScopeInfoOffset)
+
 bool SharedFunctionInfo::is_compiled() const {
   Builtins* builtins = GetIsolate()->builtins();
   DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
@@ -7890,6 +7946,44 @@
 FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
 #undef SCOPE_INFO_FIELD_ACCESSORS
 
+Object* ModuleInfoEntry::export_name() const { return get(kExportNameIndex); }
+
+Object* ModuleInfoEntry::local_name() const { return get(kLocalNameIndex); }
+
+Object* ModuleInfoEntry::import_name() const { return get(kImportNameIndex); }
+
+Object* ModuleInfoEntry::module_request() const {
+  return get(kModuleRequestIndex);
+}
+
+FixedArray* ModuleInfo::module_requests() const {
+  return FixedArray::cast(get(kModuleRequestsIndex));
+}
+
+FixedArray* ModuleInfo::special_exports() const {
+  return FixedArray::cast(get(kSpecialExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_exports() const {
+  return FixedArray::cast(get(kRegularExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_imports() const {
+  return FixedArray::cast(get(kRegularImportsIndex));
+}
+
+FixedArray* ModuleInfo::namespace_imports() const {
+  return FixedArray::cast(get(kNamespaceImportsIndex));
+}
+
+#ifdef DEBUG
+bool ModuleInfo::Equals(ModuleInfo* other) const {
+  return regular_exports() == other->regular_exports() &&
+         regular_imports() == other->regular_imports() &&
+         special_exports() == other->special_exports() &&
+         namespace_imports() == other->namespace_imports();
+}
+#endif
 
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
@@ -8176,6 +8270,12 @@
                                                     FAST_ELEMENTS, 2);
 }
 
+ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
+ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
+
+ACCESSORS(JSStringIterator, string, String, kStringOffset)
+SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+
 #undef TYPE_CHECKER
 #undef CAST_ACCESSOR
 #undef INT_ACCESSORS
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 6f1f746..9054371 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -28,7 +28,8 @@
 
 void Object::Print(std::ostream& os) {  // NOLINT
   if (IsSmi()) {
-    Smi::cast(this)->SmiPrint(os);
+    os << "Smi: " << std::hex << "0x" << Smi::cast(this)->value();
+    os << std::dec << " (" << Smi::cast(this)->value() << ")\n";
   } else {
     HeapObject::cast(this)->HeapObjectPrint(os);
   }
@@ -52,6 +53,7 @@
   HandleScope scope(GetIsolate());
   if (instance_type < FIRST_NONSTRING_TYPE) {
     String::cast(this)->StringPrint(os);
+    os << "\n";
     return;
   }
 
@@ -318,18 +320,37 @@
   }
 }
 
+namespace {
+
+template <class T>
+double GetScalarElement(T* array, int index) {
+  return array->get_scalar(index);
+}
+
+double GetScalarElement(FixedDoubleArray* array, int index) {
+  if (array->is_the_hole(index)) return bit_cast<double>(kHoleNanInt64);
+  return array->get_scalar(index);
+}
+
+bool is_the_hole(double maybe_hole) {
+  return bit_cast<uint64_t>(maybe_hole) == kHoleNanInt64;
+}
+
+}  // namespace
+
 template <class T, bool print_the_hole>
 static void DoPrintElements(std::ostream& os, Object* object) {  // NOLINT
   T* array = T::cast(object);
   if (array->length() == 0) return;
   int previous_index = 0;
-  double previous_value = array->get_scalar(0);
+  double previous_value = GetScalarElement(array, 0);
   double value = 0.0;
   int i;
   for (i = 1; i <= array->length(); i++) {
-    if (i < array->length()) value = array->get_scalar(i);
+    if (i < array->length()) value = GetScalarElement(array, i);
     bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
-    if ((previous_value == value || values_are_nan) && i != array->length()) {
+    if (i != array->length() && (previous_value == value || values_are_nan) &&
+        is_the_hole(previous_value) == is_the_hole(value)) {
       continue;
     }
     os << "\n";
@@ -339,8 +360,7 @@
       ss << '-' << (i - 1);
     }
     os << std::setw(12) << ss.str() << ": ";
-    if (print_the_hole &&
-        FixedDoubleArray::cast(object)->is_the_hole(previous_index)) {
+    if (print_the_hole && is_the_hole(previous_value)) {
       os << "<the_hole>";
     } else {
       os << previous_value;
@@ -390,22 +410,12 @@
       break;
     }
 
-#define PRINT_ELEMENTS(Kind, Type)                \
-  case Kind: {                                    \
-    DoPrintElements<Type, false>(os, elements()); \
-    break;                                        \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size)     \
+  case TYPE##_ELEMENTS: {                                       \
+    DoPrintElements<Fixed##Type##Array, false>(os, elements()); \
+    break;                                                      \
   }
-
-      PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
-      PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
-      PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
-      PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
-      PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
-      PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
-      PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
-      PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
-      PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
-
+      TYPED_ARRAYS(PRINT_ELEMENTS)
 #undef PRINT_ELEMENTS
 
     case DICTIONARY_ELEMENTS:
@@ -732,6 +742,16 @@
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
+      case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC: {
+        BinaryOpICNexus nexus(this, slot);
+        os << Code::ICState2String(nexus.StateFromFeedback());
+        break;
+      }
+      case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+        CompareICNexus nexus(this, slot);
+        os << Code::ICState2String(nexus.StateFromFeedback());
+        break;
+      }
       case FeedbackVectorSlotKind::GENERAL:
         break;
       case FeedbackVectorSlotKind::INVALID:
@@ -911,7 +931,7 @@
   JSObjectPrintHeader(os, this, "JSArrayBuffer");
   os << "\n - backing_store = " << backing_store();
   os << "\n - byte_length = " << Brief(byte_length());
-  if (was_neutered()) os << " - neutered\n";
+  if (was_neutered()) os << "\n - neutered";
   JSObjectPrintBody(os, this, !was_neutered());
 }
 
@@ -922,7 +942,7 @@
   os << "\n - byte_offset = " << Brief(byte_offset());
   os << "\n - byte_length = " << Brief(byte_length());
   os << "\n - length = " << Brief(length());
-  if (WasNeutered()) os << " - neutered\n";
+  if (WasNeutered()) os << "\n - neutered";
   JSObjectPrintBody(os, this, !WasNeutered());
 }
 
@@ -932,7 +952,7 @@
   os << "\n - buffer =" << Brief(buffer());
   os << "\n - byte_offset = " << Brief(byte_offset());
   os << "\n - byte_length = " << Brief(byte_length());
-  if (WasNeutered()) os << " - neutered\n";
+  if (WasNeutered()) os << "\n - neutered";
   JSObjectPrintBody(os, this, !WasNeutered());
 }
 
@@ -954,9 +974,9 @@
   os << "\n - name = " << Brief(shared()->name());
   os << "\n - formal_parameter_count = "
      << shared()->internal_formal_parameter_count();
-  if (shared()->is_generator()) {
+  if (IsGeneratorFunction(shared()->kind())) {
     os << "\n   - generator";
-  } else if (shared()->is_async()) {
+  } else if (IsAsyncFunction(shared()->kind())) {
     os << "\n   - async";
   }
   os << "\n - context = " << Brief(context());
@@ -1127,6 +1147,26 @@
   os << "\n";
 }
 
+void PromiseContainer::PromiseContainerPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "PromiseContainer");
+  os << "\n - thenable: " << Brief(thenable());
+  os << "\n - then: " << Brief(then());
+  os << "\n - resolve: " << Brief(resolve());
+  os << "\n - reject: " << Brief(reject());
+  os << "\n - before debug event: " << Brief(before_debug_event());
+  os << "\n - after debug event: " << Brief(after_debug_event());
+  os << "\n";
+}
+
+void Module::ModulePrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Module");
+  os << "\n - code: " << Brief(code());
+  os << "\n - exports: " << Brief(exports());
+  os << "\n - requested_modules: " << Brief(requested_modules());
+  os << "\n - evaluated: " << evaluated();
+  os << "\n - embedder_data: " << Brief(embedder_data());
+  os << "\n";
+}
 
 void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "PrototypeInfo");
@@ -1136,10 +1176,8 @@
   os << "\n";
 }
 
-
-void SloppyBlockWithEvalContextExtension::
-    SloppyBlockWithEvalContextExtensionPrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "SloppyBlockWithEvalContextExtension");
+void ContextExtension::ContextExtensionPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ContextExtension");
   os << "\n - scope_info: " << Brief(scope_info());
   os << "\n - extension: " << Brief(extension());
   os << "\n";
diff --git a/src/objects.cc b/src/objects.cc
index 00721c2..44271db 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -8,6 +8,8 @@
 #include <iomanip>
 #include <memory>
 #include <sstream>
+#include <unordered_map>
+#include <unordered_set>
 
 #include "src/objects-inl.h"
 
@@ -60,7 +62,7 @@
 #include "src/string-stream.h"
 #include "src/utils.h"
 #include "src/wasm/wasm-module.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 #ifdef ENABLE_DISASSEMBLER
 #include "src/disasm.h"
@@ -995,12 +997,12 @@
       case LookupIterator::ACCESSOR:
         return GetPropertyWithAccessor(it);
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
-        return ReadAbsentProperty(it);
+        return it->isolate()->factory()->undefined_value();
       case LookupIterator::DATA:
         return it->GetDataValue();
     }
   }
-  return ReadAbsentProperty(it);
+  return it->isolate()->factory()->undefined_value();
 }
 
 
@@ -1349,7 +1351,7 @@
                                    Object::DONT_THROW);
     Handle<Object> result = args.Call(call_fun, name);
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    if (result.is_null()) return ReadAbsentProperty(isolate, receiver, name);
+    if (result.is_null()) return isolate->factory()->undefined_value();
     // Rebox handle before return.
     return handle(*result, isolate);
   }
@@ -1366,7 +1368,7 @@
         receiver, Handle<JSReceiver>::cast(getter));
   }
   // Getter is not a function.
-  return ReadAbsentProperty(isolate, receiver, it->GetName());
+  return isolate->factory()->undefined_value();
 }
 
 // static
@@ -1677,6 +1679,71 @@
   return Just(result);
 }
 
+Maybe<bool> DefinePropertyWithInterceptorInternal(
+    LookupIterator* it, Handle<InterceptorInfo> interceptor,
+    Object::ShouldThrow should_throw, PropertyDescriptor& desc) {
+  Isolate* isolate = it->isolate();
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc(isolate);
+
+  if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
+
+  Handle<JSObject> holder = it->GetHolder<JSObject>();
+  bool result;
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<bool>());
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, should_throw);
+
+  std::unique_ptr<v8::PropertyDescriptor> descriptor(
+      new v8::PropertyDescriptor());
+  if (PropertyDescriptor::IsAccessorDescriptor(&desc)) {
+    descriptor.reset(new v8::PropertyDescriptor(
+        v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set())));
+  } else if (PropertyDescriptor::IsDataDescriptor(&desc)) {
+    if (desc.has_writable()) {
+      descriptor.reset(new v8::PropertyDescriptor(
+          v8::Utils::ToLocal(desc.value()), desc.writable()));
+    } else {
+      descriptor.reset(
+          new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value())));
+    }
+  }
+  if (desc.has_enumerable()) {
+    descriptor->set_enumerable(desc.enumerable());
+  }
+  if (desc.has_configurable()) {
+    descriptor->set_configurable(desc.configurable());
+  }
+
+  if (it->IsElement()) {
+    uint32_t index = it->index();
+    v8::IndexedPropertyDefinerCallback definer =
+        v8::ToCData<v8::IndexedPropertyDefinerCallback>(interceptor->definer());
+    result = !args.Call(definer, index, *descriptor).is_null();
+  } else {
+    Handle<Name> name = it->name();
+    DCHECK(!name->IsPrivate());
+
+    if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+      return Just(false);
+    }
+
+    v8::GenericNamedPropertyDefinerCallback definer =
+        v8::ToCData<v8::GenericNamedPropertyDefinerCallback>(
+            interceptor->definer());
+    result = !args.Call(definer, name, *descriptor).is_null();
+  }
+
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+  return Just(result);
+}
+
 }  // namespace
 
 MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
@@ -2415,10 +2482,6 @@
       accumulator->Add("<JS Generator>");
       break;
     }
-    case JS_MODULE_TYPE: {
-      accumulator->Add("<JS Module>");
-      break;
-    }
     // All other JSObjects are rather similar to each other (JSObject,
     // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
     default: {
@@ -3449,9 +3512,16 @@
   // Ensure that in-object space of slow-mode object does not contain random
   // garbage.
   int inobject_properties = new_map->GetInObjectProperties();
-  for (int i = 0; i < inobject_properties; i++) {
-    FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
-    object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+  if (inobject_properties) {
+    Heap* heap = isolate->heap();
+    heap->ClearRecordedSlotRange(
+        object->address() + map->GetInObjectPropertyOffset(0),
+        object->address() + new_instance_size);
+
+    for (int i = 0; i < inobject_properties; i++) {
+      FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+      object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+    }
   }
 
   isolate->counters()->props_to_dictionary()->Increment();
@@ -4576,13 +4646,6 @@
           if (result.IsNothing() || result.FromJust()) return result;
           // Interceptor modified the store target but failed to set the
           // property.
-          // TODO(jochen): Remove after we've identified the faulty interceptor.
-          if (!store_target_map.is_null() &&
-              *store_target_map != it->GetStoreTarget()->map()) {
-            it->isolate()->PushStackTraceAndDie(
-                0xabababaa, v8::ToCData<void*>(it->GetInterceptor()->setter()),
-                nullptr, 0xabababab);
-          }
           Utils::ApiCheck(store_target_map.is_null() ||
                               *store_target_map == it->GetStoreTarget()->map(),
                           it->IsElement() ? "v8::IndexedPropertySetterCallback"
@@ -4761,17 +4824,6 @@
   return AddDataProperty(&own_lookup, value, NONE, should_throw, store_mode);
 }
 
-MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
-  return it->isolate()->factory()->undefined_value();
-}
-
-MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
-                                               Handle<Object> receiver,
-                                               Handle<Object> name) {
-  return isolate->factory()->undefined_value();
-}
-
-
 Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
                                          Handle<Object> receiver,
                                          Handle<Object> name,
@@ -6542,6 +6594,34 @@
     it.Next();
   }
 
+  // Handle interceptor
+  if (it.state() == LookupIterator::INTERCEPTOR) {
+    Handle<Map> store_target_map;
+    if (it.GetReceiver()->IsJSObject()) {
+      store_target_map = handle(it.GetStoreTarget()->map(), it.isolate());
+    }
+    if (it.HolderIsReceiverOrHiddenPrototype()) {
+      Maybe<bool> result = DefinePropertyWithInterceptorInternal(
+          &it, it.GetInterceptor(), should_throw, *desc);
+      if (result.IsNothing() || result.FromJust()) {
+        return result;
+      }
+      // Interceptor modified the store target but failed to set the
+      // property.
+      if (!store_target_map.is_null() &&
+          *store_target_map != it.GetStoreTarget()->map()) {
+        it.isolate()->PushStackTraceAndDie(
+            0xabababaa, v8::ToCData<void*>(it.GetInterceptor()->definer()),
+            nullptr, 0xabababab);
+      }
+      Utils::ApiCheck(store_target_map.is_null() ||
+                          *store_target_map == it.GetStoreTarget()->map(),
+                      it.IsElement() ? "v8::IndexedPropertyDefinerCallback"
+                                     : "v8::NamedPropertyDefinerCallback",
+                      "Interceptor silently changed store target.");
+    }
+  }
+
   return OrdinaryDefineOwnProperty(&it, desc, should_throw);
 }
 
@@ -7261,6 +7341,57 @@
   return GetOwnPropertyDescriptor(&it, desc);
 }
 
+namespace {
+
+Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
+                                                 PropertyDescriptor* desc) {
+  if (it->state() == LookupIterator::INTERCEPTOR) {
+    Isolate* isolate = it->isolate();
+    Handle<InterceptorInfo> interceptor = it->GetInterceptor();
+    if (!interceptor->descriptor()->IsUndefined(isolate)) {
+      Handle<Object> result;
+      Handle<JSObject> holder = it->GetHolder<JSObject>();
+
+      Handle<Object> receiver = it->GetReceiver();
+      if (!receiver->IsJSReceiver()) {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+            Nothing<bool>());
+      }
+
+      PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                     *holder, Object::DONT_THROW);
+      if (it->IsElement()) {
+        uint32_t index = it->index();
+        v8::IndexedPropertyDescriptorCallback descriptorCallback =
+            v8::ToCData<v8::IndexedPropertyDescriptorCallback>(
+                interceptor->descriptor());
+
+        result = args.Call(descriptorCallback, index);
+      } else {
+        Handle<Name> name = it->name();
+        DCHECK(!name->IsPrivate());
+        v8::GenericNamedPropertyDescriptorCallback descriptorCallback =
+            v8::ToCData<v8::GenericNamedPropertyDescriptorCallback>(
+                interceptor->descriptor());
+        result = args.Call(descriptorCallback, name);
+      }
+      if (!result.is_null()) {
+        // Request successfully intercepted, try to set the property
+        // descriptor.
+        Utils::ApiCheck(
+            PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
+            it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
+                            : "v8::NamedPropertyDescriptorCallback",
+            "Invalid property descriptor.");
+
+        return Just(true);
+      }
+    }
+  }
+  return Just(false);
+}
+}  // namespace
 
 // ES6 9.1.5.1
 // Returns true on success, false if the property didn't exist, nothing if
@@ -7275,6 +7406,13 @@
                                              it->GetName(), desc);
   }
 
+  Maybe<bool> intercepted = GetPropertyDescriptorWithInterceptor(it, desc);
+  MAYBE_RETURN(intercepted, Nothing<bool>());
+  if (intercepted.FromJust()) {
+    return Just(true);
+  }
+
+  // Request was not intercepted, continue as normal.
   // 1. (Assert)
   // 2. If O does not have an own property with key P, return undefined.
   Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
@@ -9367,12 +9505,6 @@
       *map, map->is_prototype_map()
                 ? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
                 : &RuntimeCallStats::Map_TransitionToDataProperty);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      map->GetIsolate(),
-      (map->is_prototype_map()
-           ? &tracing::TraceEventStatsTable::
-                 PrototypeMap_TransitionToDataProperty
-           : &tracing::TraceEventStatsTable::Map_TransitionToDataProperty))
 
   DCHECK(name->IsUniqueName());
   DCHECK(!map->is_dictionary_map());
@@ -9459,12 +9591,6 @@
       map->is_prototype_map()
           ? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
           : &RuntimeCallStats::Map_TransitionToAccessorProperty);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate,
-      (map->is_prototype_map()
-           ? &tracing::TraceEventStatsTable::
-                 PrototypeMap_TransitionToAccessorProperty
-           : &tracing::TraceEventStatsTable::Map_TransitionToAccessorProperty));
 
   // At least one of the accessors needs to be a new value.
   DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
@@ -10177,22 +10303,76 @@
   return kFirstIndex + Length() == capacity;
 }
 
+namespace {
 
-Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
+                                           int length) {
   int capacity = array->length();
-  bool empty = (capacity == 0);
-  if (capacity < kFirstIndex + length) {
+  if (capacity < length) {
     Isolate* isolate = array->GetIsolate();
-    int new_capacity = kFirstIndex + length;
+    int new_capacity = length;
     new_capacity = new_capacity + Max(new_capacity / 2, 2);
     int grow_by = new_capacity - capacity;
     array = Handle<ArrayList>::cast(
         isolate->factory()->CopyFixedArrayAndGrow(array, grow_by));
-    if (empty) array->SetLength(0);
   }
   return array;
 }
 
+}  // namespace
+
+Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+  const bool empty = (array->length() == 0);
+  auto ret = Handle<ArrayList>::cast(
+      EnsureSpaceInFixedArray(array, kFirstIndex + length));
+  if (empty) ret->SetLength(0);
+  return ret;
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
+                                             Handle<Object> receiver,
+                                             Handle<JSFunction> function,
+                                             Handle<AbstractCode> code,
+                                             int offset, int flags) {
+  const int frame_count = in->FrameCount();
+  const int new_length = LengthFor(frame_count + 1);
+  Handle<FrameArray> array = EnsureSpace(in, new_length);
+  array->SetReceiver(frame_count, *receiver);
+  array->SetFunction(frame_count, *function);
+  array->SetCode(frame_count, *code);
+  array->SetOffset(frame_count, Smi::FromInt(offset));
+  array->SetFlags(frame_count, Smi::FromInt(flags));
+  array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+  return array;
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendWasmFrame(Handle<FrameArray> in,
+                                               Handle<Object> wasm_object,
+                                               int wasm_function_index,
+                                               Handle<AbstractCode> code,
+                                               int offset, int flags) {
+  const int frame_count = in->FrameCount();
+  const int new_length = LengthFor(frame_count + 1);
+  Handle<FrameArray> array = EnsureSpace(in, new_length);
+  array->SetWasmObject(frame_count, *wasm_object);
+  array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
+  array->SetCode(frame_count, *code);
+  array->SetOffset(frame_count, Smi::FromInt(offset));
+  array->SetFlags(frame_count, Smi::FromInt(flags));
+  array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+  return array;
+}
+
+void FrameArray::ShrinkToFit() { Shrink(LengthFor(FrameCount())); }
+
+// static
+Handle<FrameArray> FrameArray::EnsureSpace(Handle<FrameArray> array,
+                                           int length) {
+  return Handle<FrameArray>::cast(EnsureSpaceInFixedArray(array, length));
+}
+
 Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
                                                   int number_of_descriptors,
                                                   int slack,
@@ -10919,7 +11099,7 @@
       if ((type & kStringRepresentationMask) != kConsStringTag) {
         AdjustMaximumDepth();
         int length = string->length();
-        DCHECK(length != 0);
+        if (length == 0) break;  // Skip empty left-hand sides of ConsStrings.
         consumed_ += length;
         return string;
       }
@@ -11461,6 +11641,118 @@
   return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
 }
 
+namespace {  // for String.Prototype.lastIndexOf
+
+template <typename schar, typename pchar>
+int StringMatchBackwards(Vector<const schar> subject,
+                         Vector<const pchar> pattern, int idx) {
+  int pattern_length = pattern.length();
+  DCHECK(pattern_length >= 1);
+  DCHECK(idx + pattern_length <= subject.length());
+
+  if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
+    for (int i = 0; i < pattern_length; i++) {
+      uc16 c = pattern[i];
+      if (c > String::kMaxOneByteCharCode) {
+        return -1;
+      }
+    }
+  }
+
+  pchar pattern_first_char = pattern[0];
+  for (int i = idx; i >= 0; i--) {
+    if (subject[i] != pattern_first_char) continue;
+    int j = 1;
+    while (j < pattern_length) {
+      if (pattern[j] != subject[i + j]) {
+        break;
+      }
+      j++;
+    }
+    if (j == pattern_length) {
+      return i;
+    }
+  }
+  return -1;
+}
+
+}  // namespace
+
+Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+                            Handle<Object> search, Handle<Object> position) {
+  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "String.prototype.lastIndexOf")));
+  }
+  Handle<String> receiver_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+                                     Object::ToString(isolate, receiver));
+
+  Handle<String> search_string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+                                     Object::ToString(isolate, search));
+
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+                                     Object::ToNumber(position));
+
+  uint32_t start_index;
+
+  if (position->IsNaN()) {
+    start_index = receiver_string->length();
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+                                       Object::ToInteger(isolate, position));
+
+    double position_number = std::max(position->Number(), 0.0);
+    position_number = std::min(position_number,
+                               static_cast<double>(receiver_string->length()));
+    start_index = static_cast<uint32_t>(position_number);
+  }
+
+  uint32_t pattern_length = search_string->length();
+  uint32_t receiver_length = receiver_string->length();
+
+  if (start_index + pattern_length > receiver_length) {
+    start_index = receiver_length - pattern_length;
+  }
+
+  if (pattern_length == 0) {
+    return Smi::FromInt(start_index);
+  }
+
+  receiver_string = String::Flatten(receiver_string);
+  search_string = String::Flatten(search_string);
+
+  int last_index = -1;
+  DisallowHeapAllocation no_gc;  // ensure vectors stay valid
+
+  String::FlatContent receiver_content = receiver_string->GetFlatContent();
+  String::FlatContent search_content = search_string->GetFlatContent();
+
+  if (search_content.IsOneByte()) {
+    Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+    if (receiver_content.IsOneByte()) {
+      last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+                                        pat_vector, start_index);
+    } else {
+      last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+                                        pat_vector, start_index);
+    }
+  } else {
+    Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+    if (receiver_content.IsOneByte()) {
+      last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+                                        pat_vector, start_index);
+    } else {
+      last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+                                        pat_vector, start_index);
+    }
+  }
+  return Smi::FromInt(last_index);
+}
+
 bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
   int slen = length();
   // Can't check exact length equality, but we can check bounds.
@@ -12361,8 +12653,6 @@
 void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
                        PrototypeOptimizationMode proto_mode) {
   RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      map->GetIsolate(), &tracing::TraceEventStatsTable::Map_SetPrototype);
 
   bool is_hidden = false;
   if (prototype->IsJSObject()) {
@@ -12562,7 +12852,6 @@
     case JS_MAP_ITERATOR_TYPE:
     case JS_MAP_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
     case JS_OBJECT_TYPE:
     case JS_ERROR_TYPE:
     case JS_ARGUMENTS_TYPE:
@@ -12620,7 +12909,8 @@
 
 
 void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
-  DCHECK(function->IsConstructor() || function->shared()->is_resumable());
+  DCHECK(function->IsConstructor() ||
+         IsResumableFunction(function->shared()->kind()));
   if (function->has_initial_map()) return;
   Isolate* isolate = function->GetIsolate();
 
@@ -12631,7 +12921,7 @@
   // First create a new map with the size and number of in-object properties
   // suggested by the function.
   InstanceType instance_type;
-  if (function->shared()->is_resumable()) {
+  if (IsResumableFunction(function->shared()->kind())) {
     instance_type = JS_GENERATOR_OBJECT_TYPE;
   } else {
     instance_type = JS_OBJECT_TYPE;
@@ -12862,17 +13152,18 @@
   }
 
   IncrementalStringBuilder builder(isolate);
-  if (!shared_info->is_arrow()) {
-    if (shared_info->is_concise_method()) {
-      if (shared_info->is_generator()) {
+  FunctionKind kind = shared_info->kind();
+  if (!IsArrowFunction(kind)) {
+    if (IsConciseMethod(kind)) {
+      if (IsGeneratorFunction(kind)) {
         builder.AppendCharacter('*');
-      } else if (shared_info->is_async()) {
+      } else if (IsAsyncFunction(kind)) {
         builder.AppendCString("async ");
       }
     } else {
-      if (shared_info->is_generator()) {
+      if (IsGeneratorFunction(kind)) {
         builder.AppendCString("function* ");
-      } else if (shared_info->is_async()) {
+      } else if (IsAsyncFunction(kind)) {
         builder.AppendCString("async function ");
       } else {
         builder.AppendCString("function ");
@@ -13455,9 +13746,9 @@
 
 void SharedFunctionInfo::InitFromFunctionLiteral(
     Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
-  // When adding fields here, make sure Scope::AnalyzePartially is updated
-  // accordingly.
-  shared_info->set_length(lit->scope()->default_function_length());
+  // When adding fields here, make sure DeclarationScope::AnalyzePartially is
+  // updated accordingly.
+  shared_info->set_length(lit->scope()->arity());
   shared_info->set_internal_formal_parameter_count(lit->parameter_count());
   shared_info->set_function_token_position(lit->function_token_position());
   shared_info->set_start_position(lit->start_position());
@@ -13481,6 +13772,9 @@
   }
   shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
   shared_info->set_asm_function(lit->scope()->asm_function());
+  shared_info->set_requires_class_field_init(lit->requires_class_field_init());
+  shared_info->set_is_class_field_initializer(
+      lit->is_class_field_initializer());
   SetExpectedNofPropertiesFromEstimate(shared_info, lit);
 }
 
@@ -15433,10 +15727,11 @@
   return false;
 }
 
-
-void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+template <AllocationSiteUpdateMode update_or_check>
+bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
                                               ElementsKind to_kind) {
   Isolate* isolate = site->GetIsolate();
+  bool result = false;
 
   if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
     Handle<JSArray> transition_info =
@@ -15452,6 +15747,9 @@
       uint32_t length = 0;
       CHECK(transition_info->length()->ToArrayLength(&length));
       if (length <= kMaximumArrayBytesToPretransition) {
+        if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
+          return true;
+        }
         if (FLAG_trace_track_allocation_sites) {
           bool is_nested = site->IsNestedSite();
           PrintF(
@@ -15464,6 +15762,7 @@
         JSObject::TransitionElementsKind(transition_info, to_kind);
         site->dependent_code()->DeoptimizeDependentCodeGroup(
             isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+        result = true;
       }
     }
   } else {
@@ -15473,6 +15772,7 @@
       to_kind = GetHoleyElementsKind(to_kind);
     }
     if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+      if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) return true;
       if (FLAG_trace_track_allocation_sites) {
         PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
                reinterpret_cast<void*>(*site),
@@ -15482,8 +15782,10 @@
       site->SetElementsKind(to_kind);
       site->dependent_code()->DeoptimizeDependentCodeGroup(
           isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+      result = true;
     }
   }
+  return result;
 }
 
 
@@ -15499,13 +15801,13 @@
   return NULL;
 }
 
-
-void JSObject::UpdateAllocationSite(Handle<JSObject> object,
+template <AllocationSiteUpdateMode update_or_check>
+bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
                                     ElementsKind to_kind) {
-  if (!object->IsJSArray()) return;
+  if (!object->IsJSArray()) return false;
 
   Heap* heap = object->GetHeap();
-  if (!heap->InNewSpace(*object)) return;
+  if (!heap->InNewSpace(*object)) return false;
 
   Handle<AllocationSite> site;
   {
@@ -15513,14 +15815,21 @@
 
     AllocationMemento* memento =
         heap->FindAllocationMemento<Heap::kForRuntime>(*object);
-    if (memento == NULL) return;
+    if (memento == NULL) return false;
 
     // Walk through to the Allocation Site
     site = handle(memento->GetAllocationSite());
   }
-  AllocationSite::DigestTransitionFeedback(site, to_kind);
+  return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
+                                                                   to_kind);
 }
 
+template bool
+JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+    Handle<JSObject> object, ElementsKind to_kind);
+
+template bool JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kUpdate>(
+    Handle<JSObject> object, ElementsKind to_kind);
 
 void JSObject::TransitionElementsKind(Handle<JSObject> object,
                                       ElementsKind to_kind) {
@@ -15729,7 +16038,7 @@
 }
 
 int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
-  return ((Page::kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
+  return ((kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
           ElementsKindToShiftSize(kind));
 }
 
@@ -17984,7 +18293,8 @@
     if (capacity > ObjectHashTable::kMaxCapacity) {
       for (size_t i = 0; i < 2; ++i) {
         isolate->heap()->CollectAllGarbage(
-            Heap::kFinalizeIncrementalMarkingMask, "full object hash table");
+            Heap::kFinalizeIncrementalMarkingMask,
+            GarbageCollectionReason::kFullHashtable);
       }
       table->Rehash(isolate->factory()->undefined_value());
     }
@@ -19281,5 +19591,359 @@
   return false;
 }
 
+namespace {
+
+template <typename T>
+struct HandleValueHash {
+  V8_INLINE size_t operator()(Handle<T> handle) const { return handle->Hash(); }
+};
+
+struct ModuleHandleEqual {
+  V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
+    return *lhs == *rhs;
+  }
+};
+
+struct StringHandleEqual {
+  V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
+    return lhs->Equals(*rhs);
+  }
+};
+
+class UnorderedStringSet
+    : public std::unordered_set<Handle<String>, HandleValueHash<String>,
+                                StringHandleEqual,
+                                zone_allocator<Handle<String>>> {
+ public:
+  explicit UnorderedStringSet(Zone* zone)
+      : std::unordered_set<Handle<String>, HandleValueHash<String>,
+                           StringHandleEqual, zone_allocator<Handle<String>>>(
+            2 /* bucket count */, HandleValueHash<String>(),
+            StringHandleEqual(), zone_allocator<Handle<String>>(zone)) {}
+};
+
+}  // anonymous namespace
+
+class Module::ResolveSet
+    : public std::unordered_map<
+          Handle<Module>, UnorderedStringSet*, HandleValueHash<Module>,
+          ModuleHandleEqual, zone_allocator<std::pair<const Handle<Module>,
+                                                      UnorderedStringSet*>>> {
+ public:
+  explicit ResolveSet(Zone* zone)
+      : std::unordered_map<Handle<Module>, UnorderedStringSet*,
+                           HandleValueHash<Module>, ModuleHandleEqual,
+                           zone_allocator<std::pair<const Handle<Module>,
+                                                    UnorderedStringSet*>>>(
+            2 /* bucket count */, HandleValueHash<Module>(),
+            ModuleHandleEqual(),
+            zone_allocator<
+                std::pair<const Handle<Module>, UnorderedStringSet*>>(zone)),
+        zone_(zone) {}
+
+  Zone* zone() const { return zone_; }
+
+ private:
+  Zone* zone_;
+};
+
+void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
+                                  Handle<ModuleInfoEntry> entry) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<ObjectHashTable> exports(module->exports(), isolate);
+  DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+  exports = ObjectHashTable::Put(exports, name, entry);
+  module->set_exports(*exports);
+}
+
+void Module::CreateExport(Handle<Module> module, Handle<FixedArray> names) {
+  DCHECK_LT(0, names->length());
+  Isolate* isolate = module->GetIsolate();
+  Handle<Cell> cell =
+      isolate->factory()->NewCell(isolate->factory()->undefined_value());
+  Handle<ObjectHashTable> exports(module->exports(), isolate);
+  for (int i = 0, n = names->length(); i < n; ++i) {
+    Handle<String> name(String::cast(names->get(i)), isolate);
+    DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+    exports = ObjectHashTable::Put(exports, name, cell);
+  }
+  module->set_exports(*exports);
+}
+
+void Module::StoreExport(Handle<Module> module, Handle<String> name,
+                         Handle<Object> value) {
+  Handle<Cell> cell(Cell::cast(module->exports()->Lookup(name)));
+  cell->set_value(*value);
+}
+
+Handle<Object> Module::LoadExport(Handle<Module> module, Handle<String> name) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<Object> object(module->exports()->Lookup(name), isolate);
+
+  // TODO(neis): Namespace imports are not yet implemented.  Trying to use this
+  // feature may crash here.
+  if (!object->IsCell()) UNIMPLEMENTED();
+
+  return handle(Handle<Cell>::cast(object)->value(), isolate);
+}
+
+Handle<Object> Module::LoadImport(Handle<Module> module, Handle<String> name,
+                                  int module_request) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<Module> requested_module(
+      Module::cast(module->requested_modules()->get(module_request)), isolate);
+  return Module::LoadExport(requested_module, name);
+}
+
+MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
+                                        Handle<String> name, int module_request,
+                                        bool must_resolve,
+                                        Module::ResolveSet* resolve_set) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<Module> requested_module(
+      Module::cast(module->requested_modules()->get(module_request)), isolate);
+  return Module::ResolveExport(requested_module, name, must_resolve,
+                               resolve_set);
+}
+
+MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
+                                        Handle<String> name, bool must_resolve,
+                                        Module::ResolveSet* resolve_set) {
+  Isolate* isolate = module->GetIsolate();
+  Handle<Object> object(module->exports()->Lookup(name), isolate);
+  if (object->IsCell()) {
+    // Already resolved (e.g. because it's a local export).
+    return Handle<Cell>::cast(object);
+  }
+
+  // Check for cycle before recursing.
+  {
+    // Attempt insertion with a null string set.
+    auto result = resolve_set->insert({module, nullptr});
+    UnorderedStringSet*& name_set = result.first->second;
+    if (result.second) {
+      // |module| wasn't in the map previously, so allocate a new name set.
+      Zone* zone = resolve_set->zone();
+      name_set =
+          new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
+    } else if (name_set->count(name)) {
+      // Cycle detected.
+      if (must_resolve) {
+        THROW_NEW_ERROR(
+            isolate,
+            NewSyntaxError(MessageTemplate::kCyclicModuleDependency, name),
+            Cell);
+      }
+      return MaybeHandle<Cell>();
+    }
+    name_set->insert(name);
+  }
+
+  if (object->IsModuleInfoEntry()) {
+    // Not yet resolved indirect export.
+    Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
+    int module_request = Smi::cast(entry->module_request())->value();
+    Handle<String> import_name(String::cast(entry->import_name()), isolate);
+
+    Handle<Cell> cell;
+    if (!ResolveImport(module, import_name, module_request, true, resolve_set)
+             .ToHandle(&cell)) {
+      DCHECK(isolate->has_pending_exception());
+      return MaybeHandle<Cell>();
+    }
+
+    // The export table may have changed but the entry in question should be
+    // unchanged.
+    Handle<ObjectHashTable> exports(module->exports(), isolate);
+    DCHECK(exports->Lookup(name)->IsModuleInfoEntry());
+
+    exports = ObjectHashTable::Put(exports, name, cell);
+    module->set_exports(*exports);
+    return cell;
+  }
+
+  DCHECK(object->IsTheHole(isolate));
+  return Module::ResolveExportUsingStarExports(module, name, must_resolve,
+                                               resolve_set);
+}
+
+MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
+    Handle<Module> module, Handle<String> name, bool must_resolve,
+    Module::ResolveSet* resolve_set) {
+  Isolate* isolate = module->GetIsolate();
+  if (!name->Equals(isolate->heap()->default_string())) {
+    // Go through all star exports looking for the given name.  If multiple star
+    // exports provide the name, make sure they all map it to the same cell.
+    Handle<Cell> unique_cell;
+    Handle<FixedArray> special_exports(module->info()->special_exports(),
+                                       isolate);
+    for (int i = 0, n = special_exports->length(); i < n; ++i) {
+      i::Handle<i::ModuleInfoEntry> entry(
+          i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+      if (!entry->export_name()->IsUndefined(isolate)) {
+        continue;  // Indirect export.
+      }
+      int module_request = Smi::cast(entry->module_request())->value();
+
+      Handle<Cell> cell;
+      if (ResolveImport(module, name, module_request, false, resolve_set)
+              .ToHandle(&cell)) {
+        if (unique_cell.is_null()) unique_cell = cell;
+        if (*unique_cell != *cell) {
+          THROW_NEW_ERROR(
+              isolate, NewSyntaxError(MessageTemplate::kAmbiguousExport, name),
+              Cell);
+        }
+      } else if (isolate->has_pending_exception()) {
+        return MaybeHandle<Cell>();
+      }
+    }
+
+    if (!unique_cell.is_null()) {
+      // Found a unique star export for this name.
+      Handle<ObjectHashTable> exports(module->exports(), isolate);
+      DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+      exports = ObjectHashTable::Put(exports, name, unique_cell);
+      module->set_exports(*exports);
+      return unique_cell;
+    }
+  }
+
+  // Unresolvable.
+  if (must_resolve) {
+    THROW_NEW_ERROR(isolate,
+                    NewSyntaxError(MessageTemplate::kUnresolvableExport, name),
+                    Cell);
+  }
+  return MaybeHandle<Cell>();
+}
+
+bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
+                         v8::Module::ResolveCallback callback,
+                         v8::Local<v8::Value> callback_data) {
+  // Already instantiated.
+  if (module->code()->IsJSFunction()) return true;
+
+  Isolate* isolate = module->GetIsolate();
+  Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
+                                    isolate);
+  Handle<JSFunction> function =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          shared,
+          handle(Utils::OpenHandle(*context)->native_context(), isolate));
+  module->set_code(*function);
+
+  Handle<ModuleInfo> module_info(shared->scope_info()->ModuleDescriptorInfo(),
+                                 isolate);
+
+  // Set up local exports.
+  Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
+  for (int i = 0, n = regular_exports->length(); i < n; i += 2) {
+    Handle<FixedArray> export_names(
+        FixedArray::cast(regular_exports->get(i + 1)), isolate);
+    CreateExport(module, export_names);
+  }
+
+  // Partially set up indirect exports.
+  // For each indirect export, we create the appropriate slot in the export
+  // table and store its ModuleInfoEntry there.  When we later find the correct
+  // Cell in the module that actually provides the value, we replace the
+  // ModuleInfoEntry by that Cell (see ResolveExport).
+  Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+  for (int i = 0, n = special_exports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> entry(
+        ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+    Handle<Object> export_name(entry->export_name(), isolate);
+    if (export_name->IsUndefined(isolate)) continue;  // Star export.
+    CreateIndirectExport(module, Handle<String>::cast(export_name), entry);
+  }
+
+  Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
+  for (int i = 0, length = module_requests->length(); i < length; ++i) {
+    Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+    v8::Local<v8::Module> api_requested_module;
+    // TODO(adamk): Revisit these failure cases once d8 knows how to
+    // persist a module_map across multiple top-level module loads, as
+    // the current module is left in a "half-instantiated" state.
+    if (!callback(context, v8::Utils::ToLocal(specifier),
+                  v8::Utils::ToLocal(module), callback_data)
+             .ToLocal(&api_requested_module)) {
+      // TODO(adamk): Give this a better error message. But this is a
+      // misuse of the API anyway.
+      isolate->ThrowIllegalOperation();
+      return false;
+    }
+    Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
+    module->requested_modules()->set(i, *requested_module);
+    if (!Instantiate(requested_module, context, callback, callback_data)) {
+      return false;
+    }
+  }
+
+  Zone zone(isolate->allocator());
+
+  // Resolve imports.
+  Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+  for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> entry(
+        ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+    Handle<String> name(String::cast(entry->import_name()), isolate);
+    int module_request = Smi::cast(entry->module_request())->value();
+    ResolveSet resolve_set(&zone);
+    if (ResolveImport(module, name, module_request, true, &resolve_set)
+            .is_null()) {
+      return false;
+    }
+  }
+
+  // Resolve indirect exports.
+  for (int i = 0, n = special_exports->length(); i < n; ++i) {
+    Handle<ModuleInfoEntry> entry(
+        ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+    Handle<Object> name(entry->export_name(), isolate);
+    if (name->IsUndefined(isolate)) continue;  // Star export.
+    ResolveSet resolve_set(&zone);
+    if (ResolveExport(module, Handle<String>::cast(name), true, &resolve_set)
+            .is_null()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
+  DCHECK(module->code()->IsJSFunction());  // Instantiated.
+
+  Isolate* isolate = module->GetIsolate();
+
+  // Each module can only be evaluated once.
+  if (module->evaluated()) return isolate->factory()->undefined_value();
+  module->set_evaluated(true);
+
+  // Initialization.
+  Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+  DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
+  Handle<Object> receiver = isolate->factory()->undefined_value();
+  Handle<Object> argv[] = {module};
+  Handle<Object> generator;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, generator,
+      Execution::Call(isolate, function, receiver, arraysize(argv), argv),
+      Object);
+
+  // Recursion.
+  Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+  for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+    Handle<Module> import(Module::cast(requested_modules->get(i)), isolate);
+    RETURN_ON_EXCEPTION(isolate, Evaluate(import), Object);
+  }
+
+  // Evaluation of module body.
+  Handle<JSFunction> resume(
+      isolate->native_context()->generator_next_internal(), isolate);
+  return Execution::Call(isolate, resume, generator, 0, nullptr);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/objects.h b/src/objects.h
index b7c6703..fcc1f94 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -22,7 +22,7 @@
 #include "src/property-details.h"
 #include "src/unicode-decoder.h"
 #include "src/unicode.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 #if V8_TARGET_ARCH_ARM
 #include "src/arm/constants-arm.h"  // NOLINT
@@ -57,6 +57,7 @@
 //         - JSCollection
 //           - JSSet
 //           - JSMap
+//         - JSStringIterator
 //         - JSSetIterator
 //         - JSMapIterator
 //         - JSWeakCollection
@@ -76,6 +77,7 @@
 //       - BytecodeArray
 //       - FixedArray
 //         - DescriptorArray
+//         - FrameArray
 //         - LiteralsArray
 //         - HashTable
 //           - Dictionary
@@ -93,6 +95,8 @@
 //         - TemplateList
 //         - TransitionArray
 //         - ScopeInfo
+//         - ModuleInfoEntry
+//         - ModuleInfo
 //         - ScriptContextTable
 //         - WeakFixedArray
 //       - FixedDoubleArray
@@ -150,6 +154,7 @@
 //       - BreakPointInfo
 //       - CodeCache
 //       - PrototypeInfo
+//       - Module
 //     - WeakCell
 //
 // Formats of Object*:
@@ -392,8 +397,10 @@
   V(TYPE_FEEDBACK_INFO_TYPE)                                    \
   V(ALIASED_ARGUMENTS_ENTRY_TYPE)                               \
   V(BOX_TYPE)                                                   \
+  V(PROMISE_CONTAINER_TYPE)                                     \
   V(PROTOTYPE_INFO_TYPE)                                        \
-  V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE)              \
+  V(CONTEXT_EXTENSION_TYPE)                                     \
+  V(MODULE_TYPE)                                                \
                                                                 \
   V(FIXED_ARRAY_TYPE)                                           \
   V(FIXED_DOUBLE_ARRAY_TYPE)                                    \
@@ -409,7 +416,6 @@
   V(JS_ARGUMENTS_TYPE)                                          \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                           \
   V(JS_GENERATOR_OBJECT_TYPE)                                   \
-  V(JS_MODULE_TYPE)                                             \
   V(JS_GLOBAL_OBJECT_TYPE)                                      \
   V(JS_GLOBAL_PROXY_TYPE)                                       \
   V(JS_API_OBJECT_TYPE)                                         \
@@ -428,6 +434,7 @@
   V(JS_PROMISE_TYPE)                                            \
   V(JS_REGEXP_TYPE)                                             \
   V(JS_ERROR_TYPE)                                              \
+  V(JS_STRING_ITERATOR_TYPE)                                    \
                                                                 \
   V(JS_BOUND_FUNCTION_TYPE)                                     \
   V(JS_FUNCTION_TYPE)                                           \
@@ -496,6 +503,7 @@
 // manually.
 #define STRUCT_LIST(V)                                                       \
   V(BOX, Box, box)                                                           \
+  V(PROMISE_CONTAINER, PromiseContainer, promise_container)                  \
   V(ACCESSOR_INFO, AccessorInfo, accessor_info)                              \
   V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                              \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                   \
@@ -511,9 +519,8 @@
   V(DEBUG_INFO, DebugInfo, debug_info)                                       \
   V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)                      \
   V(PROTOTYPE_INFO, PrototypeInfo, prototype_info)                           \
-  V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION,                                \
-    SloppyBlockWithEvalContextExtension,                                     \
-    sloppy_block_with_eval_context_extension)
+  V(MODULE, Module, module)                                                  \
+  V(CONTEXT_EXTENSION, ContextExtension, context_extension)
 
 // We use the full 8 bits of the instance_type field to encode heap object
 // instance types.  The high-order bit (bit 7) is set if the object is not a
@@ -678,6 +685,7 @@
   TYPE_FEEDBACK_INFO_TYPE,
   ALIASED_ARGUMENTS_ENTRY_TYPE,
   BOX_TYPE,
+  PROMISE_CONTAINER_TYPE,
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
   FIXED_ARRAY_TYPE,
@@ -687,7 +695,8 @@
   TRANSITION_ARRAY_TYPE,
   PROPERTY_CELL_TYPE,
   PROTOTYPE_INFO_TYPE,
-  SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE,
+  CONTEXT_EXTENSION_TYPE,
+  MODULE_TYPE,
 
   // All the following types are subtypes of JSReceiver, which corresponds to
   // objects in the JS sense. The first and the last type in this range are
@@ -708,7 +717,6 @@
   JS_ARGUMENTS_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
-  JS_MODULE_TYPE,
   JS_ARRAY_TYPE,
   JS_ARRAY_BUFFER_TYPE,
   JS_TYPED_ARRAY_TYPE,
@@ -722,6 +730,7 @@
   JS_PROMISE_TYPE,
   JS_REGEXP_TYPE,
   JS_ERROR_TYPE,
+  JS_STRING_ITERATOR_TYPE,
   JS_BOUND_FUNCTION_TYPE,
   JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
@@ -789,7 +798,6 @@
   V(FAST_PROPERTIES_SUB_TYPE)                    \
   V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
   V(HANDLER_TABLE_SUB_TYPE)                      \
-  V(INTRINSIC_FUNCTION_NAMES_SUB_TYPE)           \
   V(JS_COLLECTION_SUB_TYPE)                      \
   V(JS_WEAK_COLLECTION_SUB_TYPE)                 \
   V(LITERALS_ARRAY_SUB_TYPE)                     \
@@ -862,7 +870,7 @@
   INLINE(static type* cast(Object* object));            \
   INLINE(static const type* cast(const Object* object));
 
-
+class AbstractCode;
 class AccessorPair;
 class AllocationSite;
 class AllocationSiteCreationContext;
@@ -878,6 +886,9 @@
 class LiteralsArray;
 class LookupIterator;
 class FieldType;
+class ModuleDescriptor;
+class ModuleInfoEntry;
+class ModuleInfo;
 class ObjectHashTable;
 class ObjectVisitor;
 class PropertyCell;
@@ -961,6 +972,7 @@
   V(JSGeneratorObject)           \
   V(Map)                         \
   V(DescriptorArray)             \
+  V(FrameArray)                  \
   V(TransitionArray)             \
   V(LiteralsArray)               \
   V(TypeFeedbackMetadata)        \
@@ -977,6 +989,8 @@
   V(ScriptContextTable)          \
   V(NativeContext)               \
   V(ScopeInfo)                   \
+  V(ModuleInfoEntry)             \
+  V(ModuleInfo)                  \
   V(JSBoundFunction)             \
   V(JSFunction)                  \
   V(Code)                        \
@@ -998,6 +1012,7 @@
   V(JSProxy)                     \
   V(JSError)                     \
   V(JSPromise)                   \
+  V(JSStringIterator)            \
   V(JSSet)                       \
   V(JSMap)                       \
   V(JSSetIterator)               \
@@ -1287,7 +1302,8 @@
   MUST_USE_RESULT static MaybeHandle<Object> InstanceOf(
       Isolate* isolate, Handle<Object> object, Handle<Object> callable);
 
-  MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
+  V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+      LookupIterator* it);
 
   // ES6 [[Set]] (when passed DONT_THROW)
   // Invariants for this and related functions (unless stated otherwise):
@@ -1313,10 +1329,6 @@
       LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
       StoreFromKeyed store_mode);
 
-  MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
-      LookupIterator* it);
-  MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
-      Isolate* isolate, Handle<Object> receiver, Handle<Object> name);
   MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
       Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
       Handle<Object> value, ShouldThrow should_throw);
@@ -1848,6 +1860,8 @@
       static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
 };
 
+enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
+
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
 class JSReceiver: public HeapObject {
@@ -1952,7 +1966,7 @@
       PropertyDescriptor* desc, PropertyDescriptor* current,
       ShouldThrow should_throw, Handle<Name> property_name = Handle<Name>());
 
-  MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+  V8_EXPORT_PRIVATE MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
       Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
       PropertyDescriptor* desc);
   MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
@@ -2060,7 +2074,7 @@
   // [elements]: The elements (properties with names that are integers).
   //
   // Elements can be in two general modes: fast and slow. Each mode
-  // corrensponds to a set of object representations of elements that
+  // corresponds to a set of object representations of elements that
   // have something in common.
   //
   // In the fast mode elements is a FixedArray and so each element can
@@ -2298,7 +2312,9 @@
   }
 
   // These methods do not perform access checks!
-  static void UpdateAllocationSite(Handle<JSObject> object,
+  template <AllocationSiteUpdateMode update_or_check =
+                AllocationSiteUpdateMode::kUpdate>
+  static bool UpdateAllocationSite(Handle<JSObject> object,
                                    ElementsKind to_kind);
 
   // Lookup interceptors are used for handling properties controlled by host
@@ -2604,6 +2620,10 @@
 // as specified by ES6 section 25.1.1.3 The IteratorResult Interface
 class JSIteratorResult: public JSObject {
  public:
+  DECL_ACCESSORS(value, Object)
+
+  DECL_ACCESSORS(done, Object)
+
   // Offsets of object fields.
   static const int kValueOffset = JSObject::kHeaderSize;
   static const int kDoneOffset = kValueOffset + kPointerSize;
@@ -2895,7 +2915,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
 };
 
-
 // Generic array grows dynamically with O(1) amortized insertion.
 class ArrayList : public FixedArray {
  public:
@@ -2925,6 +2944,82 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
 };
 
+#define FRAME_ARRAY_FIELD_LIST(V) \
+  V(WasmObject, Object)           \
+  V(WasmFunctionIndex, Smi)       \
+  V(Receiver, Object)             \
+  V(Function, JSFunction)         \
+  V(Code, AbstractCode)           \
+  V(Offset, Smi)                  \
+  V(Flags, Smi)
+
+// Container object for data collected during simple stack trace captures.
+class FrameArray : public FixedArray {
+ public:
+#define DECLARE_FRAME_ARRAY_ACCESSORS(name, type) \
+  inline type* name(int frame_ix) const;          \
+  inline void Set##name(int frame_ix, type* value);
+  FRAME_ARRAY_FIELD_LIST(DECLARE_FRAME_ARRAY_ACCESSORS)
+#undef DECLARE_FRAME_ARRAY_ACCESSORS
+
+  inline bool IsWasmFrame(int frame_ix) const;
+  inline int FrameCount() const;
+
+  void ShrinkToFit();
+
+  // Flags.
+  static const int kIsWasmFrame = 1 << 0;
+  static const int kIsStrict = 1 << 1;
+  static const int kForceConstructor = 1 << 2;
+
+  static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
+                                          Handle<Object> receiver,
+                                          Handle<JSFunction> function,
+                                          Handle<AbstractCode> code, int offset,
+                                          int flags);
+  static Handle<FrameArray> AppendWasmFrame(Handle<FrameArray> in,
+                                            Handle<Object> wasm_object,
+                                            int wasm_function_index,
+                                            Handle<AbstractCode> code,
+                                            int offset, int flags);
+
+  DECLARE_CAST(FrameArray)
+
+ private:
+  // The underlying fixed array embodies a captured stack trace. Frame i
+  // occupies indices
+  //
+  // kFirstIndex + 1 + [i * kElementsPerFrame, (i + 1) * kElementsPerFrame[,
+  //
+  // with internal offsets as below:
+
+  static const int kWasmObjectOffset = 0;
+  static const int kWasmFunctionIndexOffset = 1;
+
+  static const int kReceiverOffset = 0;
+  static const int kFunctionOffset = 1;
+
+  static const int kCodeOffset = 2;
+  static const int kOffsetOffset = 3;
+
+  static const int kFlagsOffset = 4;
+
+  static const int kElementsPerFrame = 5;
+
+  // Array layout indices.
+
+  static const int kFrameCountIndex = 0;
+  static const int kFirstIndex = 1;
+
+  static int LengthFor(int frame_count) {
+    return kFirstIndex + frame_count * kElementsPerFrame;
+  }
+
+  static Handle<FrameArray> EnsureSpace(Handle<FrameArray> array, int length);
+
+  friend class Factory;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FrameArray);
+};
 
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
@@ -3410,7 +3505,8 @@
  public:
   // Find string in the string table. If it is not there yet, it is
   // added. The return value is the string found.
-  static Handle<String> LookupString(Isolate* isolate, Handle<String> key);
+  V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
+                                                       Handle<String> key);
   static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key);
   static String* LookupKeyIfExists(Isolate* isolate, HashTableKey* key);
 
@@ -4236,6 +4332,8 @@
   // Return the function_name if present.
   String* FunctionName();
 
+  ModuleInfo* ModuleDescriptorInfo();
+
   // Return the name of the given parameter.
   String* ParameterName(int var);
 
@@ -4279,15 +4377,11 @@
                               VariableMode* mode, InitializationFlag* init_flag,
                               MaybeAssignedFlag* maybe_assigned_flag);
 
-  // Similar to ContextSlotIndex() but this method searches only among
-  // global slots of the serialized scope info. Returns the context slot index
-  // for a given slot name if the slot is present; otherwise returns a
-  // value < 0. The name must be an internalized string. If the slot is present
-  // and mode != NULL, sets *mode to the corresponding mode for that variable.
-  static int ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
-                                    Handle<String> name, VariableMode* mode,
-                                    InitializationFlag* init_flag,
-                                    MaybeAssignedFlag* maybe_assigned_flag);
+  // Lookup metadata of a MODULE-allocated variable.  Return a negative value if
+  // there is no module variable with the given name.
+  int ModuleIndex(Handle<String> name, VariableMode* mode,
+                  InitializationFlag* init_flag,
+                  MaybeAssignedFlag* maybe_assigned_flag);
 
   // Lookup the name of a certain context slot by its index.
   String* ContextSlotName(int slot_index);
@@ -4301,7 +4395,7 @@
   // slot index if the function name is present and context-allocated (named
   // function expressions, only), otherwise returns a value < 0. The name
   // must be an internalized string.
-  int FunctionContextSlotIndex(String* name, VariableMode* mode);
+  int FunctionContextSlotIndex(String* name);
 
   // Lookup support for serialized scope info.  Returns the receiver context
   // slot index if scope has a "this" binding, and the binding is
@@ -4310,7 +4404,27 @@
 
   FunctionKind function_kind();
 
-  static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope);
+  // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
+  bool HasOuterScopeInfo();
+
+  // Returns true if this ScopeInfo was created for a debug-evaluate scope.
+  bool IsDebugEvaluateScope();
+
+  // Can be used to mark a ScopeInfo that looks like a with-scope as actually
+  // being a debug-evaluate scope.
+  void SetIsDebugEvaluateScope();
+
+  // Return the outer ScopeInfo if present.
+  ScopeInfo* OuterScopeInfo();
+
+#ifdef DEBUG
+  bool Equals(ScopeInfo* other) const;
+#endif
+
+  static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
+                                  MaybeHandle<ScopeInfo> outer_scope);
+  static Handle<ScopeInfo> CreateForWithScope(
+      Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
   static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
 
   // Serializes empty scope info.
@@ -4322,18 +4436,16 @@
 
   // The layout of the static part of a ScopeInfo is as follows. Each entry is
   // numeric and occupies one array slot.
-  // 1. A set of properties of the scope
-  // 2. The number of parameters. This only applies to function scopes. For
-  //    non-function scopes this is 0.
-  // 3. The number of non-parameter variables allocated on the stack.
-  // 4. The number of non-parameter and parameter variables allocated in the
-  //    context.
+// 1. A set of properties of the scope.
+// 2. The number of parameters. For non-function scopes this is 0.
+// 3. The number of non-parameter variables allocated on the stack.
+// 4. The number of non-parameter and parameter variables allocated in the
+//    context.
 #define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
   V(Flags)                                   \
   V(ParameterCount)                          \
   V(StackLocalCount)                         \
-  V(ContextLocalCount)                       \
-  V(ContextGlobalCount)
+  V(ContextLocalCount)
 
 #define FIELD_ACCESSORS(name)       \
   inline void Set##name(int value); \
@@ -4350,7 +4462,7 @@
 
  private:
   // The layout of the variable part of a ScopeInfo is as follows:
-  // 1. ParameterEntries:
+  // 1. ParameterNames:
   //    This part stores the names of the parameters for function scopes. One
   //    slot is used per parameter, so in total this part occupies
   //    ParameterCount() slots in the array. For other scopes than function
@@ -4358,40 +4470,48 @@
   // 2. StackLocalFirstSlot:
   //    Index of a first stack slot for stack local. Stack locals belonging to
   //    this scope are located on a stack at slots starting from this index.
-  // 3. StackLocalEntries:
+  // 3. StackLocalNames:
   //    Contains the names of local variables that are allocated on the stack,
-  //    in increasing order of the stack slot index. First local variable has
-  //    a stack slot index defined in StackLocalFirstSlot (point 2 above).
+  //    in increasing order of the stack slot index. First local variable has a
+  //    stack slot index defined in StackLocalFirstSlot (point 2 above).
   //    One slot is used per stack local, so in total this part occupies
   //    StackLocalCount() slots in the array.
-  // 4. ContextLocalNameEntries:
+  // 4. ContextLocalNames:
   //    Contains the names of local variables and parameters that are allocated
   //    in the context. They are stored in increasing order of the context slot
   //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
   //    context local, so in total this part occupies ContextLocalCount() slots
   //    in the array.
-  // 5. ContextLocalInfoEntries:
+  // 5. ContextLocalInfos:
   //    Contains the variable modes and initialization flags corresponding to
-  //    the context locals in ContextLocalNameEntries. One slot is used per
+  //    the context locals in ContextLocalNames. One slot is used per
   //    context local, so in total this part occupies ContextLocalCount()
   //    slots in the array.
-  // 6. RecieverEntryIndex:
+  // 6. ReceiverInfo:
   //    If the scope binds a "this" value, one slot is reserved to hold the
   //    context or stack slot index for the variable.
-  // 7. FunctionNameEntryIndex:
+  // 7. FunctionNameInfo:
   //    If the scope belongs to a named function expression this part contains
   //    information about the function variable. It always occupies two array
   //    slots:  a. The name of the function variable.
   //            b. The context or stack slot index for the variable.
-  int ParameterEntriesIndex();
+  // 8. OuterScopeInfoIndex:
+  //    The outer scope's ScopeInfo or the hole if there's none.
+  // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+  //    For a module scope, this part contains the ModuleInfo, the number of
+  //    MODULE-allocated variables, and the metadata of those variables.  For
+  //    non-module scopes it is empty.
+  int ParameterNamesIndex();
   int StackLocalFirstSlotIndex();
-  int StackLocalEntriesIndex();
-  int ContextLocalNameEntriesIndex();
-  int ContextGlobalNameEntriesIndex();
-  int ContextLocalInfoEntriesIndex();
-  int ContextGlobalInfoEntriesIndex();
-  int ReceiverEntryIndex();
-  int FunctionNameEntryIndex();
+  int StackLocalNamesIndex();
+  int ContextLocalNamesIndex();
+  int ContextLocalInfosIndex();
+  int ReceiverInfoIndex();
+  int FunctionNameInfoIndex();
+  int OuterScopeInfoIndex();
+  int ModuleInfoIndex();
+  int ModuleVariableCountIndex();
+  int ModuleVariablesIndex();
 
   int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
              VariableLocation* location, InitializationFlag* init_flag,
@@ -4416,26 +4536,77 @@
       : public BitField<bool, ReceiverVariableField::kNext, 1> {};
   class FunctionVariableField
       : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
-  class FunctionVariableMode
-      : public BitField<VariableMode, FunctionVariableField::kNext, 3> {};
-  class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
-  };
+  class AsmModuleField
+      : public BitField<bool, FunctionVariableField::kNext, 1> {};
   class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
   class HasSimpleParametersField
       : public BitField<bool, AsmFunctionField::kNext, 1> {};
   class FunctionKindField
-      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 9> {};
+      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+  class HasOuterScopeInfoField
+      : public BitField<bool, FunctionKindField::kNext, 1> {};
+  class IsDebugEvaluateScopeField
+      : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
 
-  // BitFields representing the encoded information for context locals in the
-  // ContextLocalInfoEntries part.
-  class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
-  class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
-  class ContextLocalMaybeAssignedFlag
-      : public BitField<MaybeAssignedFlag, 4, 1> {};
+  // Properties of variables.
+  class VariableModeField : public BitField<VariableMode, 0, 3> {};
+  class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
+  class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
 
   friend class ScopeIterator;
 };
 
+class ModuleInfoEntry : public FixedArray {
+ public:
+  DECLARE_CAST(ModuleInfoEntry)
+  static Handle<ModuleInfoEntry> New(Isolate* isolate,
+                                     Handle<Object> export_name,
+                                     Handle<Object> local_name,
+                                     Handle<Object> import_name,
+                                     Handle<Object> module_request);
+  inline Object* export_name() const;
+  inline Object* local_name() const;
+  inline Object* import_name() const;
+  inline Object* module_request() const;
+
+ private:
+  friend class Factory;
+  enum {
+    kExportNameIndex,
+    kLocalNameIndex,
+    kImportNameIndex,
+    kModuleRequestIndex,
+    kLength
+  };
+};
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+  DECLARE_CAST(ModuleInfo)
+  static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+                                ModuleDescriptor* descr);
+  inline FixedArray* module_requests() const;
+  inline FixedArray* special_exports() const;
+  inline FixedArray* regular_exports() const;
+  inline FixedArray* namespace_imports() const;
+  inline FixedArray* regular_imports() const;
+
+#ifdef DEBUG
+  inline bool Equals(ModuleInfo* other) const;
+#endif
+
+ private:
+  friend class Factory;
+  enum {
+    kModuleRequestsIndex,
+    kSpecialExportsIndex,
+    kRegularExportsIndex,
+    kNamespaceImportsIndex,
+    kRegularImportsIndex,
+    kLength
+  };
+};
 
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
@@ -4487,6 +4658,9 @@
                  // catching are part of a desugaring and should therefore not
                  // be visible to the user (we won't notify the debugger of such
                  // exceptions).
+    ASYNC_AWAIT,  // The exception will be caught and cause a promise rejection
+                  // in the desugaring of an async function, so special
+                  // async/await handling in the debugger can take place.
   };
 
   // Getters for handler table based on ranges.
@@ -4539,8 +4713,8 @@
   static const int kReturnEntrySize = 2;
 
   // Encoding of the {handler} field.
-  class HandlerPredictionField : public BitField<CatchPrediction, 0, 2> {};
-  class HandlerOffsetField : public BitField<int, 2, 30> {};
+  class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
+  class HandlerOffsetField : public BitField<int, 3, 29> {};
 };
 
 // ByteArray represents fixed sized byte arrays.  Used for the relocation info
@@ -4684,6 +4858,13 @@
   // Maximal length of a single BytecodeArray.
   static const int kMaxLength = kMaxSize - kHeaderSize;
 
+  static const int kPointerFieldsBeginOffset = kConstantPoolOffset;
+  static const int kPointerFieldsEndOffset = kFrameSizeOffset;
+
+  typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+                              kPointerFieldsEndOffset, kHeaderSize>
+      MarkingBodyDescriptor;
+
   class BodyDescriptor;
 
  private:
@@ -4721,6 +4902,7 @@
   // Size is smi tagged when it is stored.
   static const int kSizeOffset = HeapObject::kHeaderSize;
   static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
+  static const int kSize = kNextOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
@@ -6226,6 +6408,7 @@
   inline bool IsJSFunctionMap();
   inline bool IsStringMap();
   inline bool IsJSProxyMap();
+  inline bool IsModuleMap();
   inline bool IsJSGlobalProxyMap();
   inline bool IsJSGlobalObjectMap();
   inline bool IsJSTypedArrayMap();
@@ -6482,6 +6665,34 @@
   DECLARE_CAST(Struct)
 };
 
+// A container struct to hold state required for
+// PromiseResolveThenableJob. {before, after}_debug_event could
+// potentially be undefined if the debugger is turned off.
+class PromiseContainer : public Struct {
+ public:
+  DECL_ACCESSORS(thenable, JSReceiver)
+  DECL_ACCESSORS(then, JSReceiver)
+  DECL_ACCESSORS(resolve, JSFunction)
+  DECL_ACCESSORS(reject, JSFunction)
+  DECL_ACCESSORS(before_debug_event, Object)
+  DECL_ACCESSORS(after_debug_event, Object)
+
+  static const int kThenableOffset = Struct::kHeaderSize;
+  static const int kThenOffset = kThenableOffset + kPointerSize;
+  static const int kResolveOffset = kThenOffset + kPointerSize;
+  static const int kRejectOffset = kResolveOffset + kPointerSize;
+  static const int kBeforeDebugEventOffset = kRejectOffset + kPointerSize;
+  static const int kAfterDebugEventOffset =
+      kBeforeDebugEventOffset + kPointerSize;
+  static const int kSize = kAfterDebugEventOffset + kPointerSize;
+
+  DECLARE_CAST(PromiseContainer)
+  DECLARE_PRINTER(PromiseContainer)
+  DECLARE_VERIFIER(PromiseContainer)
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseContainer);
+};
 
 // A simple one-element struct, useful where smis need to be boxed.
 class Box : public Struct {
@@ -6559,28 +6770,29 @@
 
 
 // Pair used to store both a ScopeInfo and an extension object in the extension
-// slot of a block context. Needed in the rare case where a declaration block
-// scope (a "varblock" as used to desugar parameter destructuring) also contains
-// a sloppy direct eval. (In no other case both are needed at the same time.)
-class SloppyBlockWithEvalContextExtension : public Struct {
+// slot of a block, catch, or with context. Needed in the rare case where a
+// declaration block scope (a "varblock" as used to desugar parameter
+// destructuring) also contains a sloppy direct eval, or for with and catch
+// scopes. (In no other case both are needed at the same time.)
+class ContextExtension : public Struct {
  public:
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
   // [extension]: Extension object.
-  DECL_ACCESSORS(extension, JSObject)
+  DECL_ACCESSORS(extension, Object)
 
-  DECLARE_CAST(SloppyBlockWithEvalContextExtension)
+  DECLARE_CAST(ContextExtension)
 
   // Dispatched behavior.
-  DECLARE_PRINTER(SloppyBlockWithEvalContextExtension)
-  DECLARE_VERIFIER(SloppyBlockWithEvalContextExtension)
+  DECLARE_PRINTER(ContextExtension)
+  DECLARE_VERIFIER(ContextExtension)
 
   static const int kScopeInfoOffset = HeapObject::kHeaderSize;
   static const int kExtensionOffset = kScopeInfoOffset + kPointerSize;
   static const int kSize = kExtensionOffset + kPointerSize;
 
  private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyBlockWithEvalContextExtension);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
 };
 
 
@@ -6803,6 +7015,15 @@
   V(Array.prototype, push, ArrayPush)                       \
   V(Array.prototype, pop, ArrayPop)                         \
   V(Array.prototype, shift, ArrayShift)                     \
+  V(Date.prototype, getDate, DateGetDate)                   \
+  V(Date.prototype, getDay, DateGetDay)                     \
+  V(Date.prototype, getFullYear, DateGetFullYear)           \
+  V(Date.prototype, getHours, DateGetHours)                 \
+  V(Date.prototype, getMilliseconds, DateGetMilliseconds)   \
+  V(Date.prototype, getMinutes, DateGetMinutes)             \
+  V(Date.prototype, getMonth, DateGetMonth)                 \
+  V(Date.prototype, getSeconds, DateGetSeconds)             \
+  V(Date.prototype, getTime, DateGetTime)                   \
   V(Function.prototype, apply, FunctionApply)               \
   V(Function.prototype, call, FunctionCall)                 \
   V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
@@ -6847,6 +7068,10 @@
   V(Math, clz32, MathClz32)                                 \
   V(Math, fround, MathFround)                               \
   V(Math, trunc, MathTrunc)                                 \
+  V(Number, isFinite, NumberIsFinite)                       \
+  V(Number, isInteger, NumberIsInteger)                     \
+  V(Number, isNaN, NumberIsNaN)                             \
+  V(Number, isSafeInteger, NumberIsSafeInteger)             \
   V(Number, parseInt, NumberParseInt)                       \
   V(Number.prototype, toString, NumberToString)
 
@@ -6869,16 +7094,20 @@
   kDataViewBuffer,
   kDataViewByteLength,
   kDataViewByteOffset,
+  kFunctionHasInstance,
   kGlobalDecodeURI,
   kGlobalDecodeURIComponent,
   kGlobalEncodeURI,
   kGlobalEncodeURIComponent,
   kGlobalEscape,
   kGlobalUnescape,
+  kGlobalIsFinite,
+  kGlobalIsNaN,
   kTypedArrayByteLength,
   kTypedArrayByteOffset,
   kTypedArrayLength,
   kSharedArrayBufferByteLength,
+  kStringIteratorNext,
 };
 
 
@@ -6984,6 +7213,10 @@
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
 
+  // The outer scope info for the purpose of parsing this function, or the hole
+  // value if it isn't yet known.
+  DECL_ACCESSORS(outer_scope_info, HeapObject)
+
   // [construct stub]: Code stub for constructing instances of this function.
   DECL_ACCESSORS(construct_stub, Code)
 
@@ -7191,30 +7424,11 @@
   // Indicates that code for this function cannot be flushed.
   DECL_BOOLEAN_ACCESSORS(dont_flush)
 
-  // Indicates that this function is a generator.
-  DECL_BOOLEAN_ACCESSORS(is_generator)
-
-  // Indicates that this function is an async function.
-  DECL_BOOLEAN_ACCESSORS(is_async)
-
-  // Indicates that this function can be suspended, either via YieldExpressions
-  // or AwaitExpressions.
-  inline bool is_resumable() const;
-
-  // Indicates that this function is an arrow function.
-  DECL_BOOLEAN_ACCESSORS(is_arrow)
-
-  // Indicates that this function is a concise method.
-  DECL_BOOLEAN_ACCESSORS(is_concise_method)
-
-  // Indicates that this function is a getter.
-  DECL_BOOLEAN_ACCESSORS(is_getter_function)
-
-  // Indicates that this function is a setter.
-  DECL_BOOLEAN_ACCESSORS(is_setter_function)
-
-  // Indicates that this function is a default constructor.
-  DECL_BOOLEAN_ACCESSORS(is_default_constructor)
+  // Indicates that this is a constructor for a base class with instance fields.
+  DECL_BOOLEAN_ACCESSORS(requires_class_field_init)
+  // Indicates that this is a synthesized function to set up class instance
+  // fields.
+  DECL_BOOLEAN_ACCESSORS(is_class_field_initializer)
 
   // Indicates that this function is an asm function.
   DECL_BOOLEAN_ACCESSORS(asm_function)
@@ -7231,7 +7445,7 @@
   // Indicates that asm->wasm conversion failed and should not be re-attempted.
   DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
 
-  inline FunctionKind kind();
+  inline FunctionKind kind() const;
   inline void set_kind(FunctionKind kind);
 
   // Indicates whether or not the code in the shared function support
@@ -7331,11 +7545,12 @@
 
   // Layout description.
   // Pointer fields.
-  static const int kNameOffset = HeapObject::kHeaderSize;
-  static const int kCodeOffset = kNameOffset + kPointerSize;
-  static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
+  static const int kCodeOffset = HeapObject::kHeaderSize;
+  static const int kNameOffset = kCodeOffset + kPointerSize;
+  static const int kOptimizedCodeMapOffset = kNameOffset + kPointerSize;
   static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
-  static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
+  static const int kOuterScopeInfoOffset = kScopeInfoOffset + kPointerSize;
+  static const int kConstructStubOffset = kOuterScopeInfoOffset + kPointerSize;
   static const int kInstanceClassNameOffset =
       kConstructStubOffset + kPointerSize;
   static const int kFunctionDataOffset =
@@ -7457,9 +7672,12 @@
 
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
+  typedef FixedBodyDescriptor<kCodeOffset,
+                              kLastPointerFieldOffset + kPointerSize, kSize>
+      BodyDescriptor;
   typedef FixedBodyDescriptor<kNameOffset,
-                              kLastPointerFieldOffset + kPointerSize,
-                              kSize> BodyDescriptor;
+                              kLastPointerFieldOffset + kPointerSize, kSize>
+      BodyDescriptorWeakCode;
 
   // Bit positions in start_position_and_type.
   // The source code start position is in the 30 most significant bits of
@@ -7491,38 +7709,19 @@
     kDontFlush,
     // byte 2
     kFunctionKind,
-    kIsArrow = kFunctionKind,
-    kIsGenerator,
-    kIsConciseMethod,
-    kIsDefaultConstructor,
-    kIsSubclassConstructor,
-    kIsBaseConstructor,
-    kIsGetterFunction,
-    kIsSetterFunction,
+    // rest of byte 2 and first two bits of byte 3 are used by FunctionKind
     // byte 3
-    kIsAsyncFunction,
-    kDeserialized,
+    kDeserialized = kFunctionKind + 10,
     kIsDeclaration,
     kIsAsmWasmBroken,
+    kRequiresClassFieldInit,
+    kIsClassFieldInitializer,
     kCompilerHintsCount,  // Pseudo entry
   };
   // kFunctionKind has to be byte-aligned
   STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
-// Make sure that FunctionKind and byte 2 are in sync:
-#define ASSERT_FUNCTION_KIND_ORDER(functionKind, compilerFunctionKind) \
-  STATIC_ASSERT(FunctionKind::functionKind ==                          \
-                1 << (compilerFunctionKind - kFunctionKind))
-  ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
-  ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
-  ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
-  ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
-  ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
-  ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
-  ASSERT_FUNCTION_KIND_ORDER(kGetterFunction, kIsGetterFunction);
-  ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
-#undef ASSERT_FUNCTION_KIND_ORDER
 
-  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 9> {};
+  class FunctionKindBits : public BitField<FunctionKind, kFunctionKind, 10> {};
 
   class DeoptCountBits : public BitField<int, 0, 4> {};
   class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7554,21 +7753,10 @@
   static const int kHasDuplicateParametersBit =
       kHasDuplicateParameters + kCompilerHintsSmiTagSize;
 
-  static const int kIsArrowBit = kIsArrow + kCompilerHintsSmiTagSize;
-  static const int kIsGeneratorBit = kIsGenerator + kCompilerHintsSmiTagSize;
-  static const int kIsConciseMethodBit =
-      kIsConciseMethod + kCompilerHintsSmiTagSize;
-  static const int kIsAsyncFunctionBit =
-      kIsAsyncFunction + kCompilerHintsSmiTagSize;
-
-  static const int kAccessorFunctionBits =
-      FunctionKind::kAccessorFunction
-      << (kFunctionKind + kCompilerHintsSmiTagSize);
-  static const int kClassConstructorBits =
-      FunctionKind::kClassConstructor
-      << (kFunctionKind + kCompilerHintsSmiTagSize);
-  static const int kFunctionKindMaskBits = FunctionKindBits::kMask
-                                           << kCompilerHintsSmiTagSize;
+  static const int kFunctionKindShift =
+      kFunctionKind + kCompilerHintsSmiTagSize;
+  static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
+                                              << kCompilerHintsSmiTagSize;
 
   // Constants for optimizing codegen for strict mode function and
   // native tests.
@@ -7687,6 +7875,100 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
 };
 
+// A Module object is a mapping from export names to cells
+// This is still very much in flux.
+class Module : public Struct {
+ public:
+  DECLARE_CAST(Module)
+  DECLARE_VERIFIER(Module)
+  DECLARE_PRINTER(Module)
+
+  // The code representing this Module, either a
+  // SharedFunctionInfo or a JSFunction depending
+  // on whether it's been instantiated.
+  DECL_ACCESSORS(code, Object)
+
+  DECL_ACCESSORS(exports, ObjectHashTable)
+
+  // [[RequestedModules]]: Modules imported or re-exported by this module.
+  // Corresponds 1-to-1 to the module specifier strings in
+  // ModuleInfo::module_requests.
+  DECL_ACCESSORS(requested_modules, FixedArray)
+
+  // [[Evaluated]]: Whether this module has been evaluated. Modules
+  // are only evaluated a single time.
+  DECL_BOOLEAN_ACCESSORS(evaluated)
+
+  // Storage for [[Evaluated]]
+  DECL_INT_ACCESSORS(flags)
+
+  // Embedder-specified data
+  DECL_ACCESSORS(embedder_data, Object)
+
+  // Get the SharedFunctionInfo associated with the code.
+  inline SharedFunctionInfo* shared() const;
+
+  // Get the ModuleInfo associated with the code.
+  inline ModuleInfo* info() const;
+
+  // Compute a hash for this object.
+  inline uint32_t Hash() const;
+
+  // Implementation of spec operation ModuleDeclarationInstantiation.
+  // Returns false if an exception occurred during instantiation, true
+  // otherwise.
+  static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
+                                          v8::Local<v8::Context> context,
+                                          v8::Module::ResolveCallback callback,
+                                          v8::Local<v8::Value> callback_data);
+
+  // Implementation of spec operation ModuleEvaluation.
+  static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
+
+  static Handle<Object> LoadExport(Handle<Module> module, Handle<String> name);
+  static void StoreExport(Handle<Module> module, Handle<String> name,
+                          Handle<Object> value);
+
+  static Handle<Object> LoadImport(Handle<Module> module, Handle<String> name,
+                                   int module_request);
+
+  static const int kCodeOffset = HeapObject::kHeaderSize;
+  static const int kExportsOffset = kCodeOffset + kPointerSize;
+  static const int kRequestedModulesOffset = kExportsOffset + kPointerSize;
+  static const int kFlagsOffset = kRequestedModulesOffset + kPointerSize;
+  static const int kEmbedderDataOffset = kFlagsOffset + kPointerSize;
+  static const int kSize = kEmbedderDataOffset + kPointerSize;
+
+ private:
+  enum { kEvaluatedBit };
+
+  static void CreateExport(Handle<Module> module, Handle<FixedArray> names);
+  static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
+                                   Handle<ModuleInfoEntry> entry);
+
+  // The [must_resolve] argument indicates whether or not an exception should be
+  // thrown in case the module does not provide an export named [name]
+  // (including when a cycle is detected).  An exception is always thrown in the
+  // case of conflicting star exports.
+  //
+  // If [must_resolve] is true, a null result indicates an exception. If
+  // [must_resolve] is false, a null result may or may not indicate an
+  // exception (so check manually!).
+  class ResolveSet;
+  static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
+      Handle<Module> module, Handle<String> name, bool must_resolve,
+      ResolveSet* resolve_set);
+  static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
+      Handle<Module> module, Handle<String> name, int module_request,
+      bool must_resolve, ResolveSet* resolve_set);
+
+  // Helper for ResolveExport.
+  static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
+      Handle<Module> module, Handle<String> name, bool must_resolve,
+      ResolveSet* resolve_set);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
+};
 
 // JSBoundFunction describes a bound function exotic object.
 class JSBoundFunction : public JSObject {
@@ -8219,7 +8501,8 @@
   DECL_ACCESSORS(flags, Object)
   DECL_ACCESSORS(source, Object)
 
-  static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
+  V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
+                                                     Flags flags);
   static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
 
   static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
@@ -8585,7 +8868,9 @@
 
   inline bool SitePointsToLiteral();
 
-  static void DigestTransitionFeedback(Handle<AllocationSite> site,
+  template <AllocationSiteUpdateMode update_or_check =
+                AllocationSiteUpdateMode::kUpdate>
+  static bool DigestTransitionFeedback(Handle<AllocationSite> site,
                                        ElementsKind to_kind);
 
   DECLARE_PRINTER(AllocationSite)
@@ -8612,6 +8897,10 @@
   static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
   static const int kPointerFieldsEndOffset = kWeakNextOffset;
 
+  typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+                              kPointerFieldsEndOffset, kSize>
+      MarkingBodyDescriptor;
+
   // For other visitors, use the fixed body descriptor below.
   typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
       BodyDescriptor;
@@ -8674,8 +8963,7 @@
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
-
-class StringHasher {
+class V8_EXPORT_PRIVATE StringHasher {
  public:
   explicit inline StringHasher(int length, uint32_t seed);
 
@@ -9123,6 +9411,9 @@
   static int IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
                      int start_index);
 
+  static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+                             Handle<Object> search, Handle<Object> position);
+
   // String equality operations.
   inline bool Equals(String* other);
   inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9295,7 +9586,7 @@
   static bool SlowEquals(Handle<String> one, Handle<String> two);
 
   // Slow case of AsArrayIndex.
-  bool SlowAsArrayIndex(uint32_t* index);
+  V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
 
   // Compute and set the hash code.
   uint32_t ComputeAndSetHash();
@@ -9860,9 +10151,6 @@
   static const int kDependentCodeOffset = kValueOffset + kPointerSize;
   static const int kSize = kDependentCodeOffset + kPointerSize;
 
-  static const int kPointerFieldsBeginOffset = kValueOffset;
-  static const int kPointerFieldsEndOffset = kSize;
-
   typedef FixedBodyDescriptor<kValueOffset,
                               kSize,
                               kSize> BodyDescriptor;
@@ -10055,6 +10343,28 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
 };
 
+class JSStringIterator : public JSObject {
+ public:
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSStringIterator)
+  DECLARE_VERIFIER(JSStringIterator)
+
+  DECLARE_CAST(JSStringIterator)
+
+  // [string]: the [[IteratedString]] internal field.
+  DECL_ACCESSORS(string, String)
+
+  // [index]: The [[StringIteratorNextIndex]] internal field.
+  inline int index() const;
+  inline void set_index(int value);
+
+  static const int kStringOffset = JSObject::kHeaderSize;
+  static const int kNextIndexOffset = kStringOffset + kPointerSize;
+  static const int kSize = kNextIndexOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
+};
 
 // OrderedHashTableIterator is an iterator that iterates over the keys and
 // values of an OrderedHashTable.
@@ -10480,12 +10790,9 @@
   static const int kLengthOffset = JSObject::kHeaderSize;
   static const int kSize = kLengthOffset + kPointerSize;
 
-  // 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
-  // we do not want to include in objects.h
-  // Note that Page::kMaxRegularHeapObjectSize has to be in sync with
-  // kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
   static const int kInitialMaxFastElementArray =
-      (600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
+      (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
+       AllocationMemento::kSize) /
       kPointerSize;
 
  private:
@@ -10684,8 +10991,10 @@
   DECL_ACCESSORS(getter, Object)
   DECL_ACCESSORS(setter, Object)
   DECL_ACCESSORS(query, Object)
+  DECL_ACCESSORS(descriptor, Object)
   DECL_ACCESSORS(deleter, Object)
   DECL_ACCESSORS(enumerator, Object)
+  DECL_ACCESSORS(definer, Object)
   DECL_ACCESSORS(data, Object)
   DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
   DECL_BOOLEAN_ACCESSORS(all_can_read)
@@ -10703,9 +11012,11 @@
   static const int kGetterOffset = HeapObject::kHeaderSize;
   static const int kSetterOffset = kGetterOffset + kPointerSize;
   static const int kQueryOffset = kSetterOffset + kPointerSize;
-  static const int kDeleterOffset = kQueryOffset + kPointerSize;
+  static const int kDescriptorOffset = kQueryOffset + kPointerSize;
+  static const int kDeleterOffset = kDescriptorOffset + kPointerSize;
   static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
-  static const int kDataOffset = kEnumeratorOffset + kPointerSize;
+  static const int kDefinerOffset = kEnumeratorOffset + kPointerSize;
+  static const int kDataOffset = kDefinerOffset + kPointerSize;
   static const int kFlagsOffset = kDataOffset + kPointerSize;
   static const int kSize = kFlagsOffset + kPointerSize;
 
diff --git a/src/ostreams.h b/src/ostreams.h
index 977b5c6..dea7514 100644
--- a/src/ostreams.h
+++ b/src/ostreams.h
@@ -13,6 +13,7 @@
 
 #include "include/v8config.h"
 #include "src/base/macros.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -33,7 +34,7 @@
 
 
 // An output stream writing to a file.
-class OFStream : public std::ostream {
+class V8_EXPORT_PRIVATE OFStream : public std::ostream {
  public:
   explicit OFStream(FILE* f);
   virtual ~OFStream();
diff --git a/src/parsing/duplicate-finder.cc b/src/parsing/duplicate-finder.cc
new file mode 100644
index 0000000..6b57153
--- /dev/null
+++ b/src/parsing/duplicate-finder.cc
@@ -0,0 +1,145 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/duplicate-finder.h"
+
+#include "src/conversions.h"
+#include "src/unicode-cache.h"
+
+namespace v8 {
+namespace internal {
+
+int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
+  return AddSymbol(key, true, value);
+}
+
+int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
+  return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
+}
+
+int DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte,
+                               int value) {
+  uint32_t hash = Hash(key, is_one_byte);
+  byte* encoding = BackupKey(key, is_one_byte);
+  base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
+  int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+  entry->value =
+      reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+  return old_value;
+}
+
+int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
+  DCHECK(key.length() > 0);
+  // Quick check for already being in canonical form.
+  if (IsNumberCanonical(key)) {
+    return AddOneByteSymbol(key, value);
+  }
+
+  int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
+  double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+  int length;
+  const char* string;
+  if (!std::isfinite(double_value)) {
+    string = "Infinity";
+    length = 8;  // strlen("Infinity");
+  } else {
+    string = DoubleToCString(double_value,
+                             Vector<char>(number_buffer_, kBufferSize));
+    length = StrLength(string);
+  }
+  return AddSymbol(
+      Vector<const byte>(reinterpret_cast<const byte*>(string), length), true,
+      value);
+}
+
+bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
+  // Test for a safe approximation of number literals that are already
+  // in canonical form: max 15 digits, no leading zeroes, except an
+  // integer part that is a single zero, and no trailing zeros below
+  // the decimal point.
+  int pos = 0;
+  int length = number.length();
+  if (number.length() > 15) return false;
+  if (number[pos] == '0') {
+    pos++;
+  } else {
+    while (pos < length &&
+           static_cast<unsigned>(number[pos] - '0') <= ('9' - '0'))
+      pos++;
+  }
+  if (length == pos) return true;
+  if (number[pos] != '.') return false;
+  pos++;
+  bool invalid_last_digit = true;
+  while (pos < length) {
+    uint8_t digit = number[pos] - '0';
+    if (digit > '9' - '0') return false;
+    invalid_last_digit = (digit == 0);
+    pos++;
+  }
+  return !invalid_last_digit;
+}
+
+uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
+  // Primitive hash function, almost identical to the one used
+  // for strings (except that it's seeded by the length and representation).
+  int length = key.length();
+  uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0);
+  for (int i = 0; i < length; i++) {
+    uint32_t c = key[i];
+    hash = (hash + c) * 1025;
+    hash ^= (hash >> 6);
+  }
+  return hash;
+}
+
+bool DuplicateFinder::Match(void* first, void* second) {
+  // Decode lengths.
+  // Length + representation is encoded as base 128, most significant heptet
+  // first, with a 8th bit being non-zero while there are more heptets.
+  // The value encodes the number of bytes following, and whether the original
+  // was Latin1.
+  byte* s1 = reinterpret_cast<byte*>(first);
+  byte* s2 = reinterpret_cast<byte*>(second);
+  uint32_t length_one_byte_field = 0;
+  byte c1;
+  do {
+    c1 = *s1;
+    if (c1 != *s2) return false;
+    length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
+    s1++;
+    s2++;
+  } while ((c1 & 0x80) != 0);
+  int length = static_cast<int>(length_one_byte_field >> 1);
+  return memcmp(s1, s2, length) == 0;
+}
+
+byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
+                                 bool is_one_byte) {
+  uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
+  backing_store_.StartSequence();
+  // Emit one_byte_length as base-128 encoded number, with the 7th bit set
+  // on the byte of every heptet except the last, least significant, one.
+  if (one_byte_length >= (1 << 7)) {
+    if (one_byte_length >= (1 << 14)) {
+      if (one_byte_length >= (1 << 21)) {
+        if (one_byte_length >= (1 << 28)) {
+          backing_store_.Add(
+              static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
+        }
+        backing_store_.Add(
+            static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
+      }
+      backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
+    }
+    backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
+  }
+  backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
+
+  backing_store_.AddBlock(bytes);
+  return backing_store_.EndSequence().start();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/parsing/duplicate-finder.h b/src/parsing/duplicate-finder.h
new file mode 100644
index 0000000..a3858e7
--- /dev/null
+++ b/src/parsing/duplicate-finder.h
@@ -0,0 +1,64 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_DUPLICATE_FINDER_H_
+#define V8_PARSING_DUPLICATE_FINDER_H_
+
+#include "src/base/hashmap.h"
+#include "src/collector.h"
+
+namespace v8 {
+namespace internal {
+
+class UnicodeCache;
+
+// DuplicateFinder discovers duplicate symbols.
+class DuplicateFinder {
+ public:
+  explicit DuplicateFinder(UnicodeCache* constants)
+      : unicode_constants_(constants), backing_store_(16), map_(&Match) {}
+
+  int AddOneByteSymbol(Vector<const uint8_t> key, int value);
+  int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
+  // Add a a number literal by converting it (if necessary)
+  // to the string that ToString(ToNumber(literal)) would generate.
+  // and then adding that string with AddOneByteSymbol.
+  // This string is the actual value used as key in an object literal,
+  // and the one that must be different from the other keys.
+  int AddNumber(Vector<const uint8_t> key, int value);
+
+ private:
+  int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
+  // Backs up the key and its length in the backing store.
+  // The backup is stored with a base 127 encoding of the
+  // length (plus a bit saying whether the string is one byte),
+  // followed by the bytes of the key.
+  uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
+
+  // Compare two encoded keys (both pointing into the backing store)
+  // for having the same base-127 encoded lengths and representation.
+  // and then having the same 'length' bytes following.
+  static bool Match(void* first, void* second);
+  // Creates a hash from a sequence of bytes.
+  static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
+  // Checks whether a string containing a JS number is its canonical
+  // form.
+  static bool IsNumberCanonical(Vector<const uint8_t> key);
+
+  // Size of buffer. Sufficient for using it to call DoubleToCString in
+  // from conversions.h.
+  static const int kBufferSize = 100;
+
+  UnicodeCache* unicode_constants_;
+  // Backing store used to store strings used as hashmap keys.
+  SequenceCollector<unsigned char> backing_store_;
+  base::CustomMatcherHashMap map_;
+  // Buffer used for string->number->canonical string conversions.
+  char number_buffer_[kBufferSize];
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PARSING_DUPLICATE_FINDER_H_
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index 9190e18..6a1fbac 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -7,11 +7,12 @@
 
 #include "src/messages.h"
 #include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
 
 namespace v8 {
 namespace internal {
 
+class DuplicateFinder;
+
 #define ERROR_CODES(T)                       \
   T(ExpressionProduction, 0)                 \
   T(FormalParameterInitializerProduction, 1) \
@@ -21,11 +22,32 @@
   T(StrictModeFormalParametersProduction, 5) \
   T(ArrowFormalParametersProduction, 6)      \
   T(LetPatternProduction, 7)                 \
-  T(ObjectLiteralProduction, 8)              \
-  T(TailCallExpressionProduction, 9)         \
-  T(AsyncArrowFormalParametersProduction, 10)
+  T(TailCallExpressionProduction, 8)         \
+  T(AsyncArrowFormalParametersProduction, 9)
 
-template <typename Traits>
+// Expression classifiers serve two purposes:
+//
+// 1) They keep track of error messages that are pending (and other
+//    related information), waiting for the parser to decide whether
+//    the parsed expression is a pattern or not.
+// 2) They keep track of expressions that may need to be rewritten, if
+//    the parser decides that they are not patterns.  (A different
+//    mechanism implements the rewriting of patterns.)
+//
+// Expression classifiers are used by the parser in a stack fashion.
+// Each new classifier is pushed on top of the stack.  This happens
+// automatically by the class's constructor.  While on top of the
+// stack, the classifier records pending error messages and tracks the
+// pending non-patterns of the expression that is being parsed.
+//
+// At the end of its life, a classifier is either "accumulated" to the
+// one that is below it on the stack, or is "discarded".  The former
+// is achieved by calling the method Accumulate.  The latter is
+// achieved automatically by the destructor, but it can happen earlier
+// by calling the method Discard.  Both actions result in removing the
+// classifier from the parser's stack.
+
+template <typename Types>
 class ExpressionClassifier {
  public:
   enum ErrorKind : unsigned {
@@ -55,51 +77,41 @@
     const char* arg;
   };
 
+  // clang-format off
   enum TargetProduction : unsigned {
 #define DEFINE_PRODUCTION(NAME, CODE) NAME = 1 << CODE,
     ERROR_CODES(DEFINE_PRODUCTION)
 #undef DEFINE_PRODUCTION
 
-        ExpressionProductions =
-            (ExpressionProduction | FormalParameterInitializerProduction |
-             TailCallExpressionProduction),
-    PatternProductions = (BindingPatternProduction |
-                          AssignmentPatternProduction | LetPatternProduction),
-    FormalParametersProductions = (DistinctFormalParametersProduction |
-                                   StrictModeFormalParametersProduction),
-    AllProductions =
-        (ExpressionProductions | PatternProductions |
-         FormalParametersProductions | ArrowFormalParametersProduction |
-         ObjectLiteralProduction | AsyncArrowFormalParametersProduction)
+#define DEFINE_ALL_PRODUCTIONS(NAME, CODE) NAME |
+    AllProductions = ERROR_CODES(DEFINE_ALL_PRODUCTIONS) /* | */ 0
+#undef DEFINE_ALL_PRODUCTIONS
   };
+  // clang-format on
 
   enum FunctionProperties : unsigned {
     NonSimpleParameter = 1 << 0
   };
 
-  explicit ExpressionClassifier(const Traits* t)
-      : zone_(t->zone()),
-        non_patterns_to_rewrite_(t->GetNonPatternList()),
-        reported_errors_(t->GetReportedErrorList()),
-        duplicate_finder_(nullptr),
-        invalid_productions_(0),
-        function_properties_(0) {
-    reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
-    non_pattern_begin_ = non_patterns_to_rewrite_->length();
-  }
-
-  ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
-      : zone_(t->zone()),
-        non_patterns_to_rewrite_(t->GetNonPatternList()),
-        reported_errors_(t->GetReportedErrorList()),
+  explicit ExpressionClassifier(typename Types::Base* base,
+                                DuplicateFinder* duplicate_finder = nullptr)
+      : base_(base),
+        previous_(base->classifier_),
+        zone_(base->impl()->zone()),
+        non_patterns_to_rewrite_(base->impl()->GetNonPatternList()),
+        reported_errors_(base->impl()->GetReportedErrorList()),
         duplicate_finder_(duplicate_finder),
         invalid_productions_(0),
         function_properties_(0) {
+    base->classifier_ = this;
     reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
     non_pattern_begin_ = non_patterns_to_rewrite_->length();
   }
 
-  ~ExpressionClassifier() { Discard(); }
+  V8_INLINE ~ExpressionClassifier() {
+    Discard();
+    if (base_->classifier_ == this) base_->classifier_ = previous_;
+  }
 
   V8_INLINE bool is_valid(unsigned productions) const {
     return (invalid_productions_ & productions) == 0;
@@ -179,14 +191,6 @@
     return reported_error(kLetPatternProduction);
   }
 
-  V8_INLINE bool has_object_literal_error() const {
-    return !is_valid(ObjectLiteralProduction);
-  }
-
-  V8_INLINE const Error& object_literal_error() const {
-    return reported_error(kObjectLiteralProduction);
-  }
-
   V8_INLINE bool has_tail_call_expression() const {
     return !is_valid(TailCallExpressionProduction);
   }
@@ -295,14 +299,6 @@
     Add(Error(loc, message, kLetPatternProduction, arg));
   }
 
-  void RecordObjectLiteralError(const Scanner::Location& loc,
-                                MessageTemplate::Template message,
-                                const char* arg = nullptr) {
-    if (has_object_literal_error()) return;
-    invalid_productions_ |= ObjectLiteralProduction;
-    Add(Error(loc, message, kObjectLiteralProduction, arg));
-  }
-
   void RecordTailCallExpressionError(const Scanner::Location& loc,
                                      MessageTemplate::Template message,
                                      const char* arg = nullptr) {
@@ -316,7 +312,14 @@
     DCHECK_EQ(inner->reported_errors_, reported_errors_);
     DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
     DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
-    if (merge_non_patterns) MergeNonPatterns(inner);
+    DCHECK_EQ(inner->non_patterns_to_rewrite_, non_patterns_to_rewrite_);
+    DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
+    DCHECK_LE(inner->non_pattern_begin_, non_patterns_to_rewrite_->length());
+    // Merge non-patterns from the inner classifier, or discard them.
+    if (merge_non_patterns)
+      inner->non_pattern_begin_ = non_patterns_to_rewrite_->length();
+    else
+      non_patterns_to_rewrite_->Rewind(inner->non_pattern_begin_);
     // Propagate errors from inner, but don't overwrite already recorded
     // errors.
     unsigned non_arrow_inner_invalid_productions =
@@ -393,10 +396,7 @@
     non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
   }
 
-  V8_INLINE void MergeNonPatterns(ExpressionClassifier* inner) {
-    DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
-    inner->non_pattern_begin_ = inner->non_patterns_to_rewrite_->length();
-  }
+  ExpressionClassifier* previous() const { return previous_; }
 
  private:
   V8_INLINE const Error& reported_error(ErrorKind kind) const {
@@ -410,6 +410,9 @@
     // We should only be looking for an error when we know that one has
     // been reported.  But we're not...  So this is to make sure we have
     // the same behaviour.
+    UNREACHABLE();
+
+    // Make MSVC happy by returning an error from this inaccessible path.
     static Error none;
     return none;
   }
@@ -434,8 +437,10 @@
     reported_errors_end_++;
   }
 
+  typename Types::Base* base_;
+  ExpressionClassifier* previous_;
   Zone* zone_;
-  ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
+  ZoneList<typename Types::Expression>* non_patterns_to_rewrite_;
   ZoneList<Error>* reported_errors_;
   DuplicateFinder* duplicate_finder_;
   // The uint16_t for non_pattern_begin_ will not be enough in the case,
@@ -456,6 +461,8 @@
   // stack overflow while parsing.
   uint16_t reported_errors_begin_;
   uint16_t reported_errors_end_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExpressionClassifier);
 };
 
 
diff --git a/src/parsing/func-name-inferrer.cc b/src/parsing/func-name-inferrer.cc
index 0821be0..a86e1c2 100644
--- a/src/parsing/func-name-inferrer.cc
+++ b/src/parsing/func-name-inferrer.cc
@@ -45,9 +45,11 @@
 }
 
 void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
-  DCHECK(names_stack_.length() > 0);
-  DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
-  names_stack_.RemoveLast();
+  if (IsOpen()) {
+    DCHECK(names_stack_.length() > 0);
+    DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
+    names_stack_.RemoveLast();
+  }
 }
 
 const AstString* FuncNameInferrer::MakeNameFromStack() {
diff --git a/src/parsing/func-name-inferrer.h b/src/parsing/func-name-inferrer.h
index cffd8a8..cc9204b 100644
--- a/src/parsing/func-name-inferrer.h
+++ b/src/parsing/func-name-inferrer.h
@@ -6,7 +6,7 @@
 #define V8_PARSING_FUNC_NAME_INFERRER_H_
 
 #include "src/handles.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/parsing/parameter-initializer-rewriter.cc b/src/parsing/parameter-initializer-rewriter.cc
index b12a80f..73224a2 100644
--- a/src/parsing/parameter-initializer-rewriter.cc
+++ b/src/parsing/parameter-initializer-rewriter.cc
@@ -47,9 +47,9 @@
   }
   // No need to visit the constructor since it will have the class
   // scope on its scope chain.
-  ZoneList<ObjectLiteralProperty*>* props = class_literal->properties();
+  ZoneList<ClassLiteralProperty*>* props = class_literal->properties();
   for (int i = 0; i < props->length(); ++i) {
-    ObjectLiteralProperty* prop = props->at(i);
+    ClassLiteralProperty* prop = props->at(i);
     if (!prop->key()->IsLiteral()) {
       Visit(prop->key());
     }
diff --git a/src/parsing/parameter-initializer-rewriter.h b/src/parsing/parameter-initializer-rewriter.h
index a0ff7d2..5e409b4 100644
--- a/src/parsing/parameter-initializer-rewriter.h
+++ b/src/parsing/parameter-initializer-rewriter.h
@@ -5,7 +5,7 @@
 #ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
 #define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
 
-#include "src/types.h"
+#include "src/ast/ast-types.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/parsing/parse-info.cc b/src/parsing/parse-info.cc
index dfec061..5b9b5e4 100644
--- a/src/parsing/parse-info.cc
+++ b/src/parsing/parse-info.cc
@@ -33,7 +33,9 @@
 
 ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
     : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
-  set_context(Handle<Context>(function->context()));
+  if (!function->context()->IsNativeContext()) {
+    set_outer_scope_info(handle(function->context()->scope_info()));
+  }
 }
 
 ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
@@ -86,17 +88,13 @@
   return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
 }
 
-bool ParseInfo::is_arrow() const {
-  return (compiler_hints_ & (1 << SharedFunctionInfo::kIsArrow)) != 0;
+bool ParseInfo::requires_class_field_init() const {
+  return (compiler_hints_ &
+          (1 << SharedFunctionInfo::kRequiresClassFieldInit)) != 0;
 }
-
-bool ParseInfo::is_async() const {
-  return (compiler_hints_ & (1 << SharedFunctionInfo::kIsAsyncFunction)) != 0;
-}
-
-bool ParseInfo::is_default_constructor() const {
-  return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDefaultConstructor)) !=
-         0;
+bool ParseInfo::is_class_field_initializer() const {
+  return (compiler_hints_ &
+          (1 << SharedFunctionInfo::kIsClassFieldInitializer)) != 0;
 }
 
 FunctionKind ParseInfo::function_kind() const {
diff --git a/src/parsing/parse-info.h b/src/parsing/parse-info.h
index 6176135..4aedae4 100644
--- a/src/parsing/parse-info.h
+++ b/src/parsing/parse-info.h
@@ -148,9 +148,8 @@
 
   // Getters for individual compiler hints.
   bool is_declaration() const;
-  bool is_arrow() const;
-  bool is_async() const;
-  bool is_default_constructor() const;
+  bool requires_class_field_init() const;
+  bool is_class_field_initializer() const;
   FunctionKind function_kind() const;
 
   //--------------------------------------------------------------------------
@@ -159,11 +158,15 @@
   Isolate* isolate() const { return isolate_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_; }
   Handle<Script> script() const { return script_; }
-  Handle<Context> context() const { return context_; }
+  MaybeHandle<ScopeInfo> maybe_outer_scope_info() const {
+    return maybe_outer_scope_info_;
+  }
   void clear_script() { script_ = Handle<Script>::null(); }
   void set_isolate(Isolate* isolate) { isolate_ = isolate; }
   void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
-  void set_context(Handle<Context> context) { context_ = context; }
+  void set_outer_scope_info(Handle<ScopeInfo> outer_scope_info) {
+    maybe_outer_scope_info_ = outer_scope_info;
+  }
   void set_script(Handle<Script> script) { script_ = script; }
   //--------------------------------------------------------------------------
 
@@ -178,7 +181,10 @@
   void ReopenHandlesInNewHandleScope() {
     shared_ = Handle<SharedFunctionInfo>(*shared_);
     script_ = Handle<Script>(*script_);
-    context_ = Handle<Context>(*context_);
+    Handle<ScopeInfo> outer_scope_info;
+    if (maybe_outer_scope_info_.ToHandle(&outer_scope_info)) {
+      maybe_outer_scope_info_ = Handle<ScopeInfo>(*outer_scope_info);
+    }
   }
 
 #ifdef DEBUG
@@ -224,7 +230,7 @@
   Isolate* isolate_;
   Handle<SharedFunctionInfo> shared_;
   Handle<Script> script_;
-  Handle<Context> context_;
+  MaybeHandle<ScopeInfo> maybe_outer_scope_info_;
 
   //----------- Inputs+Outputs of parsing and scope analysis -----------------
   ScriptData** cached_data_;  // used if available, populated if requested.
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index b8703d0..1ebbee4 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -5,6 +5,7 @@
 #ifndef V8_PARSING_PARSER_BASE_H
 #define V8_PARSING_PARSER_BASE_H
 
+#include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
 #include "src/base/hashmap.h"
@@ -56,59 +57,6 @@
   return static_cast<T>(bitfield) & static_cast<T>(mask);
 }
 
-enum class MethodKind {
-  kNormal = 0,
-  kStatic = 1 << 0,
-  kGenerator = 1 << 1,
-  kStaticGenerator = kStatic | kGenerator,
-  kAsync = 1 << 2,
-  kStaticAsync = kStatic | kAsync,
-
-  /* Any non-ordinary method kinds */
-  kSpecialMask = kGenerator | kAsync
-};
-
-inline bool IsValidMethodKind(MethodKind kind) {
-  return kind == MethodKind::kNormal || kind == MethodKind::kStatic ||
-         kind == MethodKind::kGenerator ||
-         kind == MethodKind::kStaticGenerator || kind == MethodKind::kAsync ||
-         kind == MethodKind::kStaticAsync;
-}
-
-static inline MethodKind operator|(MethodKind lhs, MethodKind rhs) {
-  typedef unsigned char T;
-  return static_cast<MethodKind>(static_cast<T>(lhs) | static_cast<T>(rhs));
-}
-
-static inline MethodKind& operator|=(MethodKind& lhs, const MethodKind& rhs) {
-  lhs = lhs | rhs;
-  DCHECK(IsValidMethodKind(lhs));
-  return lhs;
-}
-
-static inline bool operator&(MethodKind bitfield, MethodKind mask) {
-  typedef unsigned char T;
-  return static_cast<T>(bitfield) & static_cast<T>(mask);
-}
-
-inline bool IsNormalMethod(MethodKind kind) {
-  return kind == MethodKind::kNormal;
-}
-
-inline bool IsSpecialMethod(MethodKind kind) {
-  return kind & MethodKind::kSpecialMask;
-}
-
-inline bool IsStaticMethod(MethodKind kind) {
-  return kind & MethodKind::kStatic;
-}
-
-inline bool IsGeneratorMethod(MethodKind kind) {
-  return kind & MethodKind::kGenerator;
-}
-
-inline bool IsAsyncMethod(MethodKind kind) { return kind & MethodKind::kAsync; }
-
 struct FormalParametersBase {
   explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
   DeclarationScope* scope;
@@ -126,8 +74,8 @@
 // thus it must never be used where only a single statement
 // is correct (e.g. an if statement branch w/o braces)!
 
-#define CHECK_OK_CUSTOM(x) ok); \
-  if (!*ok) return this->x();   \
+#define CHECK_OK_CUSTOM(x, ...) ok);       \
+  if (!*ok) return impl()->x(__VA_ARGS__); \
   ((void)0
 #define DUMMY )  // to make indentation work
 #undef DUMMY
@@ -140,93 +88,86 @@
 // following the Curiously Recurring Template Pattern (CRTP).
 // The structure of the parser objects is roughly the following:
 //
-//   // Common denominator, needed to avoid cyclic dependency.
-//   // Instances of this template will end up with very minimal
-//   // definitions, ideally containing just typedefs.
+//   // A structure template containing type definitions, needed to
+//   // avoid a cyclic dependency.
 //   template <typename Impl>
-//   class ParserBaseTraits;
-
+//   struct ParserTypes;
+//
 //   // The parser base object, which should just implement pure
 //   // parser behavior.  The Impl parameter is the actual derived
 //   // class (according to CRTP), which implements impure parser
 //   // behavior.
 //   template <typename Impl>
-//   class ParserBase : public ParserBaseTraits<Impl> { ... };
+//   class ParserBase { ... };
 //
 //   // And then, for each parser variant (e.g., parser, preparser, etc):
 //   class Parser;
 //
 //   template <>
-//   class ParserBaseTraits<Parser> { ... };
+//   class ParserTypes<Parser> { ... };
 //
 //   class Parser : public ParserBase<Parser> { ... };
 //
-// TODO(nikolaos): Currently the traits objects contain many things
-// that will be moved to the implementation objects or to the parser
-// base.  The following comments will have to change, when this happens.
+// The parser base object implements pure parsing, according to the
+// language grammar.  Different parser implementations may exhibit
+// different parser-driven behavior that is not considered as pure
+// parsing, e.g., early error detection and reporting, AST generation, etc.
 
-// The traits class template encapsulates the differences between
-// parser/pre-parser implementations.  In particular:
-
-// - Return types: For example, Parser functions return Expression* and
-// PreParser functions return PreParserExpression.
-
-// - Creating parse tree nodes: Parser generates an AST during the recursive
-// descent. PreParser doesn't create a tree. Instead, it passes around minimal
-// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
-// just enough data for the upper layer functions. PreParserFactory is
-// responsible for creating these dummy objects. It provides a similar kind of
-// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
-// used.
-
-// - Miscellaneous other tasks interleaved with the recursive descent. For
-// example, Parser keeps track of which function literals should be marked as
-// pretenured, and PreParser doesn't care.
-
-// The traits are expected to contain the following typedefs:
+// The ParserTypes structure encapsulates the differences in the
+// types used in parsing methods.  E.g., Parser methods use Expression*
+// and PreParser methods use PreParserExpression.  For any given parser
+// implementation class Impl, it is expected to contain the following typedefs:
+//
 // template <>
-// class ParserBaseTraits<Impl> {
-//   // In particular...
-//   struct Type {
-//     typedef GeneratorVariable;
-//     typedef AstProperties;
-//     typedef ExpressionClassifier;
-//     // Return types for traversing functions.
-//     typedef Identifier;
-//     typedef Expression;
-//     typedef YieldExpression;
-//     typedef FunctionLiteral;
-//     typedef ClassLiteral;
-//     typedef Literal;
-//     typedef ObjectLiteralProperty;
-//     typedef ExpressionList;
-//     typedef PropertyList;
-//     typedef FormalParameter;
-//     typedef FormalParameters;
-//     typedef StatementList;
-//     // For constructing objects returned by the traversing functions.
-//     typedef Factory;
-//   };
-//   // ...
+// struct ParserTypes<Impl> {
+//   // Synonyms for ParserBase<Impl> and Impl, respectively.
+//   typedef Base;
+//   typedef Impl;
+//   // TODO(nikolaos): this one will probably go away, as it is
+//   // not related to pure parsing.
+//   typedef Variable;
+//   // Return types for traversing functions.
+//   typedef Identifier;
+//   typedef Expression;
+//   typedef FunctionLiteral;
+//   typedef ObjectLiteralProperty;
+//   typedef ClassLiteralProperty;
+//   typedef ExpressionList;
+//   typedef ObjectPropertyList;
+//   typedef ClassPropertyList;
+//   typedef FormalParameters;
+//   typedef Statement;
+//   typedef StatementList;
+//   typedef Block;
+//   typedef BreakableStatement;
+//   typedef IterationStatement;
+//   // For constructing objects returned by the traversing functions.
+//   typedef Factory;
+//   // For other implementation-specific tasks.
+//   typedef Target;
+//   typedef TargetScope;
 // };
 
 template <typename Impl>
-class ParserBaseTraits;
+struct ParserTypes;
 
 template <typename Impl>
-class ParserBase : public ParserBaseTraits<Impl> {
+class ParserBase {
  public:
-  // Shorten type names defined by Traits.
-  typedef ParserBaseTraits<Impl> Traits;
-  typedef typename Traits::Type::Expression ExpressionT;
-  typedef typename Traits::Type::Identifier IdentifierT;
-  typedef typename Traits::Type::FormalParameter FormalParameterT;
-  typedef typename Traits::Type::FormalParameters FormalParametersT;
-  typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
-  typedef typename Traits::Type::Literal LiteralT;
-  typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
-  typedef typename Traits::Type::StatementList StatementListT;
-  typedef typename Traits::Type::ExpressionClassifier ExpressionClassifier;
+  // Shorten type names defined by ParserTypes<Impl>.
+  typedef ParserTypes<Impl> Types;
+  typedef typename Types::Identifier IdentifierT;
+  typedef typename Types::Expression ExpressionT;
+  typedef typename Types::FunctionLiteral FunctionLiteralT;
+  typedef typename Types::ObjectLiteralProperty ObjectLiteralPropertyT;
+  typedef typename Types::ClassLiteralProperty ClassLiteralPropertyT;
+  typedef typename Types::ExpressionList ExpressionListT;
+  typedef typename Types::FormalParameters FormalParametersT;
+  typedef typename Types::Statement StatementT;
+  typedef typename Types::StatementList StatementListT;
+  typedef typename Types::Block BlockT;
+  typedef typename v8::internal::ExpressionClassifier<Types>
+      ExpressionClassifier;
 
   // All implementation-specific methods must be called through this.
   Impl* impl() { return static_cast<Impl*>(this); }
@@ -246,6 +187,7 @@
         parsing_module_(false),
         stack_limit_(stack_limit),
         zone_(zone),
+        classifier_(nullptr),
         scanner_(scanner),
         stack_overflow_(false),
         allow_lazy_(false),
@@ -257,7 +199,8 @@
         allow_harmony_function_sent_(false),
         allow_harmony_async_await_(false),
         allow_harmony_restrictive_generators_(false),
-        allow_harmony_trailing_commas_(false) {}
+        allow_harmony_trailing_commas_(false),
+        allow_harmony_class_fields_(false) {}
 
 #define ALLOW_ACCESSORS(name)                           \
   bool allow_##name() const { return allow_##name##_; } \
@@ -273,6 +216,7 @@
   ALLOW_ACCESSORS(harmony_async_await);
   ALLOW_ACCESSORS(harmony_restrictive_generators);
   ALLOW_ACCESSORS(harmony_trailing_commas);
+  ALLOW_ACCESSORS(harmony_class_fields);
 
 #undef ALLOW_ACCESSORS
 
@@ -280,7 +224,12 @@
 
   void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
 
+  Zone* zone() const { return zone_; }
+
  protected:
+  friend class v8::internal::ExpressionClassifier<ParserTypes<Impl>>;
+
+  // clang-format off
   enum AllowRestrictedIdentifiers {
     kAllowRestrictedIdentifiers,
     kDontAllowRestrictedIdentifiers
@@ -291,14 +240,26 @@
     PARSE_EAGERLY
   };
 
+  enum LazyParsingResult {
+    kLazyParsingComplete,
+    kLazyParsingAborted
+  };
+
   enum VariableDeclarationContext {
     kStatementListItem,
     kStatement,
     kForStatement
   };
 
+  enum class FunctionBodyType {
+    kNormal,
+    kSingleExpression
+  };
+  // clang-format on
+
   class Checkpoint;
-  class ObjectLiteralCheckerBase;
+  class ClassLiteralChecker;
+  class ObjectLiteralChecker;
 
   // ---------------------------------------------------------------------------
   // ScopeState and its subclasses implement the parser's scope stack.
@@ -333,8 +294,8 @@
     // allocation.
     // TODO(verwaest): Move to LazyBlockState class that only allocates the
     // scope when needed.
-    explicit BlockState(ScopeState** scope_stack)
-        : ScopeState(scope_stack, NewScope(*scope_stack)) {}
+    explicit BlockState(Zone* zone, ScopeState** scope_stack)
+        : ScopeState(scope_stack, NewScope(zone, *scope_stack)) {}
 
     void SetNonlinear() { this->scope()->SetNonlinear(); }
     void set_start_position(int pos) { this->scope()->set_start_position(pos); }
@@ -348,9 +309,8 @@
     }
 
    private:
-    Scope* NewScope(ScopeState* outer_state) {
+    Scope* NewScope(Zone* zone, ScopeState* outer_state) {
       Scope* parent = outer_state->scope();
-      Zone* zone = outer_state->zone();
       return new (zone) Scope(zone, parent, BLOCK_SCOPE);
     }
   };
@@ -384,14 +344,6 @@
       expressions_.Add(expr, zone_);
     }
 
-    void AddExplicitTailCall(ExpressionT expr, const Scanner::Location& loc) {
-      if (!has_explicit_tail_calls()) {
-        loc_ = loc;
-        has_explicit_tail_calls_ = true;
-      }
-      expressions_.Add(expr, zone_);
-    }
-
     void Append(const TailCallExpressionList& other) {
       if (!has_explicit_tail_calls()) {
         loc_ = other.loc_;
@@ -425,9 +377,13 @@
   class FunctionState final : public ScopeState {
    public:
     FunctionState(FunctionState** function_state_stack,
-                  ScopeState** scope_stack, Scope* scope, FunctionKind kind);
+                  ScopeState** scope_stack, DeclarationScope* scope);
     ~FunctionState();
 
+    DeclarationScope* scope() const {
+      return ScopeState::scope()->AsDeclarationScope();
+    }
+
     int NextMaterializedLiteralIndex() {
       return next_materialized_literal_index_++;
     }
@@ -442,24 +398,27 @@
     void AddProperty() { expected_property_count_++; }
     int expected_property_count() { return expected_property_count_; }
 
-    bool is_generator() const { return IsGeneratorFunction(kind_); }
-    bool is_async_function() const { return IsAsyncFunction(kind_); }
-    bool is_resumable() const { return is_generator() || is_async_function(); }
-
-    FunctionKind kind() const { return kind_; }
+    FunctionKind kind() const { return scope()->function_kind(); }
     FunctionState* outer() const { return outer_function_state_; }
 
-    void set_generator_object_variable(
-        typename Traits::Type::GeneratorVariable* variable) {
+    void set_generator_object_variable(typename Types::Variable* variable) {
       DCHECK(variable != NULL);
-      DCHECK(is_resumable());
+      DCHECK(IsResumableFunction(kind()));
       generator_object_variable_ = variable;
     }
-    typename Traits::Type::GeneratorVariable* generator_object_variable()
-        const {
+    typename Types::Variable* generator_object_variable() const {
       return generator_object_variable_;
     }
 
+    void set_promise_variable(typename Types::Variable* variable) {
+      DCHECK(variable != NULL);
+      DCHECK(IsAsyncFunction(kind()));
+      promise_variable_ = variable;
+    }
+    typename Types::Variable* promise_variable() const {
+      return promise_variable_;
+    }
+
     const ZoneList<DestructuringAssignment>&
         destructuring_assignments_to_rewrite() const {
       return destructuring_assignments_to_rewrite_;
@@ -474,14 +433,6 @@
         tail_call_expressions_.AddImplicitTailCall(expression);
       }
     }
-    void AddExplicitTailCallExpression(ExpressionT expression,
-                                       const Scanner::Location& loc) {
-      DCHECK(expression->IsCall());
-      if (return_expr_context() ==
-          ReturnExprContext::kInsideValidReturnStatement) {
-        tail_call_expressions_.AddExplicitTailCall(expression, loc);
-      }
-    }
 
     ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
       return &reported_errors_;
@@ -530,11 +481,13 @@
     // Properties count estimation.
     int expected_property_count_;
 
-    FunctionKind kind_;
     // For generators, this variable may hold the generator object. It variable
     // is used by yield expressions and return statements. It is not necessary
     // for generator functions to have this variable set.
     Variable* generator_object_variable_;
+    // For async functions, this variable holds a temporary for the Promise
+    // being created as output of the async function.
+    Variable* promise_variable_;
 
     FunctionState** function_state_stack_;
     FunctionState* outer_function_state_;
@@ -644,8 +597,97 @@
     Mode old_mode_;
   };
 
+  struct DeclarationDescriptor {
+    enum Kind { NORMAL, PARAMETER };
+    Scope* scope;
+    Scope* hoist_scope;
+    VariableMode mode;
+    int declaration_pos;
+    int initialization_pos;
+    Kind declaration_kind;
+  };
+
+  struct DeclarationParsingResult {
+    struct Declaration {
+      Declaration(ExpressionT pattern, int initializer_position,
+                  ExpressionT initializer)
+          : pattern(pattern),
+            initializer_position(initializer_position),
+            initializer(initializer) {}
+
+      ExpressionT pattern;
+      int initializer_position;
+      ExpressionT initializer;
+    };
+
+    DeclarationParsingResult()
+        : declarations(4),
+          first_initializer_loc(Scanner::Location::invalid()),
+          bindings_loc(Scanner::Location::invalid()) {}
+
+    DeclarationDescriptor descriptor;
+    List<Declaration> declarations;
+    Scanner::Location first_initializer_loc;
+    Scanner::Location bindings_loc;
+  };
+
+  struct CatchInfo {
+   public:
+    explicit CatchInfo(ParserBase* parser)
+        : name(parser->impl()->EmptyIdentifier()),
+          variable(nullptr),
+          pattern(parser->impl()->EmptyExpression()),
+          scope(nullptr),
+          init_block(parser->impl()->NullBlock()),
+          inner_block(parser->impl()->NullBlock()),
+          for_promise_reject(false),
+          bound_names(1, parser->zone()),
+          tail_call_expressions(parser->zone()) {}
+    IdentifierT name;
+    Variable* variable;
+    ExpressionT pattern;
+    Scope* scope;
+    BlockT init_block;
+    BlockT inner_block;
+    bool for_promise_reject;
+    ZoneList<const AstRawString*> bound_names;
+    TailCallExpressionList tail_call_expressions;
+  };
+
+  struct ForInfo {
+   public:
+    explicit ForInfo(ParserBase* parser)
+        : bound_names(1, parser->zone()),
+          mode(ForEachStatement::ENUMERATE),
+          each_loc(),
+          parsing_result() {}
+    ZoneList<const AstRawString*> bound_names;
+    ForEachStatement::VisitMode mode;
+    Scanner::Location each_loc;
+    DeclarationParsingResult parsing_result;
+  };
+
+  struct ClassInfo {
+   public:
+    explicit ClassInfo(ParserBase* parser)
+        : proxy(nullptr),
+          extends(parser->impl()->EmptyExpression()),
+          properties(parser->impl()->NewClassPropertyList(4)),
+          instance_field_initializers(parser->impl()->NewExpressionList(0)),
+          constructor(parser->impl()->EmptyFunctionLiteral()),
+          has_seen_constructor(false),
+          static_initializer_var(nullptr) {}
+    VariableProxy* proxy;
+    ExpressionT extends;
+    typename Types::ClassPropertyList properties;
+    ExpressionListT instance_field_initializers;
+    FunctionLiteralT constructor;
+    bool has_seen_constructor;
+    Variable* static_initializer_var;
+  };
+
   DeclarationScope* NewScriptScope() const {
-    return new (zone()) DeclarationScope(zone());
+    return new (zone()) DeclarationScope(zone(), ast_value_factory());
   }
 
   DeclarationScope* NewVarblockScope() const {
@@ -653,7 +695,7 @@
   }
 
   ModuleScope* NewModuleScope(DeclarationScope* parent) const {
-    return new (zone()) ModuleScope(zone(), parent, ast_value_factory());
+    return new (zone()) ModuleScope(parent, ast_value_factory());
   }
 
   DeclarationScope* NewEvalScope(Scope* parent) const {
@@ -683,12 +725,18 @@
         new (zone()) DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
     // TODO(verwaest): Move into the DeclarationScope constructor.
     if (!IsArrowFunction(kind)) {
-      result->DeclareThis(ast_value_factory());
       result->DeclareDefaultFunctionVariables(ast_value_factory());
     }
     return result;
   }
 
+  V8_INLINE DeclarationScope* GetDeclarationScope() const {
+    return scope()->GetDeclarationScope();
+  }
+  V8_INLINE DeclarationScope* GetClosureScope() const {
+    return scope()->GetClosureScope();
+  }
+
   Scanner* scanner() const { return scanner_; }
   AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
   int position() const { return scanner_->location().beg_pos; }
@@ -696,7 +744,6 @@
   bool stack_overflow() const { return stack_overflow_; }
   void set_stack_overflow() { stack_overflow_ = true; }
   Mode mode() const { return mode_; }
-  Zone* zone() const { return zone_; }
 
   INLINE(Token::Value peek()) {
     if (stack_overflow_) return Token::ILLEGAL;
@@ -761,8 +808,12 @@
     Expect(Token::SEMICOLON, ok);
   }
 
-  // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
+  // Dummy functions, just useful as arguments to CHECK_OK_CUSTOM.
   static void Void() {}
+  template <typename T>
+  static T Return(T result) {
+    return result;
+  }
 
   bool is_any_identifier(Token::Value token) {
     return token == Token::IDENTIFIER || token == Token::ENUM ||
@@ -796,7 +847,7 @@
     }
   }
 
-  bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
+  bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
     if (Check(Token::IN)) {
       *visit_mode = ForEachStatement::ENUMERATE;
       return true;
@@ -818,21 +869,19 @@
     Scanner::Location octal = scanner()->octal_position();
     if (octal.IsValid() && beg_pos <= octal.beg_pos &&
         octal.end_pos <= end_pos) {
-      ReportMessageAt(octal, message);
+      impl()->ReportMessageAt(octal, message);
       scanner()->clear_octal_position();
       *ok = false;
     }
   }
   // for now, this check just collects statistics.
-  void CheckDecimalLiteralWithLeadingZero(int* use_counts, int beg_pos,
-                                          int end_pos) {
+  void CheckDecimalLiteralWithLeadingZero(int beg_pos, int end_pos) {
     Scanner::Location token_location =
         scanner()->decimal_with_leading_zero_position();
     if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
         token_location.end_pos <= end_pos) {
       scanner()->clear_decimal_with_leading_zero_position();
-      if (use_counts != nullptr)
-        ++use_counts[v8::Isolate::kDecimalWithLeadingZeroInStrictMode];
+      impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
     }
   }
 
@@ -846,9 +895,7 @@
                       ok);
   }
 
-  void CheckDestructuringElement(ExpressionT element,
-                                 ExpressionClassifier* classifier, int beg_pos,
-                                 int end_pos);
+  void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
 
   // Checking the name of a function literal. This has to be done after parsing
   // the function, since the function can declare itself strict.
@@ -859,14 +906,14 @@
     // The function name needs to be checked in strict mode.
     if (is_sloppy(language_mode)) return;
 
-    if (this->IsEvalOrArguments(function_name)) {
-      Traits::ReportMessageAt(function_name_loc,
+    if (impl()->IsEvalOrArguments(function_name)) {
+      impl()->ReportMessageAt(function_name_loc,
                               MessageTemplate::kStrictEvalArguments);
       *ok = false;
       return;
     }
     if (function_name_validity == kFunctionNameIsStrictReserved) {
-      Traits::ReportMessageAt(function_name_loc,
+      impl()->ReportMessageAt(function_name_loc,
                               MessageTemplate::kUnexpectedStrictReserved);
       *ok = false;
       return;
@@ -880,50 +927,45 @@
     return Token::Precedence(token);
   }
 
-  typename Traits::Type::Factory* factory() { return &ast_node_factory_; }
+  typename Types::Factory* factory() { return &ast_node_factory_; }
 
   DeclarationScope* GetReceiverScope() const {
     return scope()->GetReceiverScope();
   }
   LanguageMode language_mode() { return scope()->language_mode(); }
-  bool is_generator() const { return function_state_->is_generator(); }
-  bool is_async_function() const {
-    return function_state_->is_async_function();
+  void RaiseLanguageMode(LanguageMode mode) {
+    LanguageMode old = scope()->language_mode();
+    impl()->SetLanguageMode(scope(), old > mode ? old : mode);
   }
-  bool is_resumable() const { return function_state_->is_resumable(); }
+  bool is_generator() const {
+    return IsGeneratorFunction(function_state_->kind());
+  }
+  bool is_async_function() const {
+    return IsAsyncFunction(function_state_->kind());
+  }
+  bool is_resumable() const {
+    return IsResumableFunction(function_state_->kind());
+  }
 
   // Report syntax errors.
-  void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
+  void ReportMessage(MessageTemplate::Template message) {
+    Scanner::Location source_location = scanner()->location();
+    impl()->ReportMessageAt(source_location, message,
+                            static_cast<const char*>(nullptr), kSyntaxError);
+  }
+
+  template <typename T>
+  void ReportMessage(MessageTemplate::Template message, T arg,
                      ParseErrorType error_type = kSyntaxError) {
     Scanner::Location source_location = scanner()->location();
-    Traits::ReportMessageAt(source_location, message, arg, error_type);
-  }
-
-  void ReportMessage(MessageTemplate::Template message, const AstRawString* arg,
-                     ParseErrorType error_type = kSyntaxError) {
-    Scanner::Location source_location = scanner()->location();
-    Traits::ReportMessageAt(source_location, message, arg, error_type);
-  }
-
-  void ReportMessageAt(Scanner::Location location,
-                       MessageTemplate::Template message,
-                       const char* arg = NULL,
-                       ParseErrorType error_type = kSyntaxError) {
-    Traits::ReportMessageAt(location, message, arg, error_type);
-  }
-
-  void ReportMessageAt(Scanner::Location location,
-                       MessageTemplate::Template message,
-                       const AstRawString* arg,
-                       ParseErrorType error_type = kSyntaxError) {
-    Traits::ReportMessageAt(location, message, arg, error_type);
+    impl()->ReportMessageAt(source_location, message, arg, error_type);
   }
 
   void ReportMessageAt(Scanner::Location location,
                        MessageTemplate::Template message,
                        ParseErrorType error_type) {
-    ReportMessageAt(location, message, static_cast<const char*>(nullptr),
-                    error_type);
+    impl()->ReportMessageAt(location, message,
+                            static_cast<const char*>(nullptr), error_type);
   }
 
   void GetUnexpectedTokenMessage(
@@ -938,59 +980,47 @@
 
   void ReportClassifierError(
       const typename ExpressionClassifier::Error& error) {
-    Traits::ReportMessageAt(error.location, error.message, error.arg,
+    impl()->ReportMessageAt(error.location, error.message, error.arg,
                             error.type);
   }
 
-  void ValidateExpression(const ExpressionClassifier* classifier, bool* ok) {
-    if (!classifier->is_valid_expression() ||
-        classifier->has_object_literal_error()) {
-      const Scanner::Location& a = classifier->expression_error().location;
-      const Scanner::Location& b =
-          classifier->object_literal_error().location;
-      if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
-        ReportClassifierError(classifier->object_literal_error());
-      } else {
-        ReportClassifierError(classifier->expression_error());
-      }
+  void ValidateExpression(bool* ok) {
+    if (!classifier()->is_valid_expression()) {
+      ReportClassifierError(classifier()->expression_error());
       *ok = false;
     }
   }
 
-  void ValidateFormalParameterInitializer(
-      const ExpressionClassifier* classifier, bool* ok) {
-    if (!classifier->is_valid_formal_parameter_initializer()) {
-      ReportClassifierError(classifier->formal_parameter_initializer_error());
+  void ValidateFormalParameterInitializer(bool* ok) {
+    if (!classifier()->is_valid_formal_parameter_initializer()) {
+      ReportClassifierError(classifier()->formal_parameter_initializer_error());
       *ok = false;
     }
   }
 
-  void ValidateBindingPattern(const ExpressionClassifier* classifier,
-                              bool* ok) {
-    if (!classifier->is_valid_binding_pattern()) {
-      ReportClassifierError(classifier->binding_pattern_error());
+  void ValidateBindingPattern(bool* ok) {
+    if (!classifier()->is_valid_binding_pattern()) {
+      ReportClassifierError(classifier()->binding_pattern_error());
       *ok = false;
     }
   }
 
-  void ValidateAssignmentPattern(const ExpressionClassifier* classifier,
-                                 bool* ok) {
-    if (!classifier->is_valid_assignment_pattern()) {
-      ReportClassifierError(classifier->assignment_pattern_error());
+  void ValidateAssignmentPattern(bool* ok) {
+    if (!classifier()->is_valid_assignment_pattern()) {
+      ReportClassifierError(classifier()->assignment_pattern_error());
       *ok = false;
     }
   }
 
-  void ValidateFormalParameters(const ExpressionClassifier* classifier,
-                                LanguageMode language_mode,
+  void ValidateFormalParameters(LanguageMode language_mode,
                                 bool allow_duplicates, bool* ok) {
     if (!allow_duplicates &&
-        !classifier->is_valid_formal_parameter_list_without_duplicates()) {
-      ReportClassifierError(classifier->duplicate_formal_parameter_error());
+        !classifier()->is_valid_formal_parameter_list_without_duplicates()) {
+      ReportClassifierError(classifier()->duplicate_formal_parameter_error());
       *ok = false;
     } else if (is_strict(language_mode) &&
-               !classifier->is_valid_strict_mode_formal_parameters()) {
-      ReportClassifierError(classifier->strict_mode_formal_parameter_error());
+               !classifier()->is_valid_strict_mode_formal_parameters()) {
+      ReportClassifierError(classifier()->strict_mode_formal_parameter_error());
       *ok = false;
     }
   }
@@ -999,78 +1029,73 @@
     return is_any_identifier(token) || token == Token::LPAREN;
   }
 
-  void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
-                                     ExpressionT expr,
+  void ValidateArrowFormalParameters(ExpressionT expr,
                                      bool parenthesized_formals, bool is_async,
                                      bool* ok) {
-    if (classifier->is_valid_binding_pattern()) {
+    if (classifier()->is_valid_binding_pattern()) {
       // A simple arrow formal parameter: IDENTIFIER => BODY.
-      if (!this->IsIdentifier(expr)) {
-        Traits::ReportMessageAt(scanner()->location(),
+      if (!impl()->IsIdentifier(expr)) {
+        impl()->ReportMessageAt(scanner()->location(),
                                 MessageTemplate::kUnexpectedToken,
                                 Token::String(scanner()->current_token()));
         *ok = false;
       }
-    } else if (!classifier->is_valid_arrow_formal_parameters()) {
+    } else if (!classifier()->is_valid_arrow_formal_parameters()) {
       // If after parsing the expr, we see an error but the expression is
       // neither a valid binding pattern nor a valid parenthesized formal
       // parameter list, show the "arrow formal parameters" error if the formals
       // started with a parenthesis, and the binding pattern error otherwise.
       const typename ExpressionClassifier::Error& error =
-          parenthesized_formals ? classifier->arrow_formal_parameters_error()
-                                : classifier->binding_pattern_error();
+          parenthesized_formals ? classifier()->arrow_formal_parameters_error()
+                                : classifier()->binding_pattern_error();
       ReportClassifierError(error);
       *ok = false;
     }
-    if (is_async && !classifier->is_valid_async_arrow_formal_parameters()) {
+    if (is_async && !classifier()->is_valid_async_arrow_formal_parameters()) {
       const typename ExpressionClassifier::Error& error =
-          classifier->async_arrow_formal_parameters_error();
+          classifier()->async_arrow_formal_parameters_error();
       ReportClassifierError(error);
       *ok = false;
     }
   }
 
-  void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
-    if (!classifier->is_valid_let_pattern()) {
-      ReportClassifierError(classifier->let_pattern_error());
+  void ValidateLetPattern(bool* ok) {
+    if (!classifier()->is_valid_let_pattern()) {
+      ReportClassifierError(classifier()->let_pattern_error());
       *ok = false;
     }
   }
 
-  void CheckNoTailCallExpressions(const ExpressionClassifier* classifier,
-                                  bool* ok) {
-    if (FLAG_harmony_explicit_tailcalls &&
-        classifier->has_tail_call_expression()) {
-      ReportClassifierError(classifier->tail_call_expression_error());
-      *ok = false;
-    }
-  }
-
-  void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
+  void ExpressionUnexpectedToken() {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
     Scanner::Location location = scanner()->peek_location();
     GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
-    classifier->RecordExpressionError(location, message, arg);
+    classifier()->RecordExpressionError(location, message, arg);
   }
 
-  void BindingPatternUnexpectedToken(ExpressionClassifier* classifier) {
+  void BindingPatternUnexpectedToken() {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
     Scanner::Location location = scanner()->peek_location();
     GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
-    classifier->RecordBindingPatternError(location, message, arg);
+    classifier()->RecordBindingPatternError(location, message, arg);
   }
 
-  void ArrowFormalParametersUnexpectedToken(ExpressionClassifier* classifier) {
+  void ArrowFormalParametersUnexpectedToken() {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
     Scanner::Location location = scanner()->peek_location();
     GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
-    classifier->RecordArrowFormalParametersError(location, message, arg);
+    classifier()->RecordArrowFormalParametersError(location, message, arg);
   }
 
-  // Recursive descent functions:
+  // Recursive descent functions.
+  // All ParseXXX functions take as the last argument an *ok parameter
+  // which is set to false if parsing failed; it is unchanged otherwise.
+  // By making the 'exception handling' explicit, we are forced to check
+  // for failure at the call sites. The family of CHECK_OK* macros can
+  // be useful for this.
 
   // Parses an identifier that is valid for the current scope, in particular it
   // fails on strict mode future reserved keywords in a strict scope. If
@@ -1078,8 +1103,7 @@
   // "arguments" as identifier even in strict mode (this is needed in cases like
   // "var foo = eval;").
   IdentifierT ParseIdentifier(AllowRestrictedIdentifiers, bool* ok);
-  IdentifierT ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
-                                         bool* ok);
+  IdentifierT ParseAndClassifyIdentifier(bool* ok);
   // Parses an identifier or a strict mode future reserved word, and indicate
   // whether it is strict mode future reserved. Allows passing in function_kind
   // for the case of parsing the identifier in a function expression, where the
@@ -1098,76 +1122,173 @@
 
   ExpressionT ParseRegExpLiteral(bool* ok);
 
-  ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
-                                     bool* is_async, bool* ok);
-  ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
-                                     bool* ok) {
+  ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
+  ExpressionT ParsePrimaryExpression(bool* ok) {
     bool is_async;
-    return ParsePrimaryExpression(classifier, &is_async, ok);
-  }
-  ExpressionT ParseExpression(bool accept_IN, bool* ok);
-  ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
-                              bool* ok);
-  ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
-                                bool* is_computed_name,
-                                ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
-  ObjectLiteralPropertyT ParsePropertyDefinition(
-      ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
-      MethodKind kind, bool* is_computed_name, bool* has_seen_constructor,
-      ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
-  typename Traits::Type::ExpressionList ParseArguments(
-      Scanner::Location* first_spread_pos, bool maybe_arrow,
-      ExpressionClassifier* classifier, bool* ok);
-  typename Traits::Type::ExpressionList ParseArguments(
-      Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
-      bool* ok) {
-    return ParseArguments(first_spread_pos, false, classifier, ok);
+    return ParsePrimaryExpression(&is_async, ok);
   }
 
-  ExpressionT ParseAssignmentExpression(bool accept_IN,
-                                        ExpressionClassifier* classifier,
-                                        bool* ok);
-  ExpressionT ParseYieldExpression(bool accept_IN,
-                                   ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParseTailCallExpression(ExpressionClassifier* classifier,
-                                      bool* ok);
-  ExpressionT ParseConditionalExpression(bool accept_IN,
-                                         ExpressionClassifier* classifier,
-                                         bool* ok);
-  ExpressionT ParseBinaryExpression(int prec, bool accept_IN,
-                                    ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParseUnaryExpression(ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParsePostfixExpression(ExpressionClassifier* classifier,
-                                     bool* ok);
-  ExpressionT ParseLeftHandSideExpression(ExpressionClassifier* classifier,
-                                          bool* ok);
-  ExpressionT ParseMemberWithNewPrefixesExpression(
-      ExpressionClassifier* classifier, bool* is_async, bool* ok);
-  ExpressionT ParseMemberExpression(ExpressionClassifier* classifier,
-                                    bool* is_async, bool* ok);
-  ExpressionT ParseMemberExpressionContinuation(
-      ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
-      bool* ok);
+  // This method wraps the parsing of the expression inside a new expression
+  // classifier and calls RewriteNonPattern if parsing is successful.
+  // It should be used whenever we're parsing an expression that will be
+  // used as a non-pattern (i.e., in most cases).
+  V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
+
+  // This method does not wrap the parsing of the expression inside a
+  // new expression classifier; it uses the top-level classifier instead.
+  // It should be used whenever we're parsing something with the "cover"
+  // grammar that recognizes both patterns and non-patterns (which roughly
+  // corresponds to what's inside the parentheses generated by the symbol
+  // "CoverParenthesizedExpressionAndArrowParameterList" in the ES 2017
+  // specification).
+  ExpressionT ParseExpressionCoverGrammar(bool accept_IN, bool* ok);
+
+  ExpressionT ParseArrayLiteral(bool* ok);
+
+  enum class PropertyKind {
+    kAccessorProperty,
+    kValueProperty,
+    kShorthandProperty,
+    kMethodProperty,
+    kClassField,
+    kNotSet
+  };
+
+  bool SetPropertyKindFromToken(Token::Value token, PropertyKind* kind);
+  ExpressionT ParsePropertyName(IdentifierT* name, PropertyKind* kind,
+                                bool* is_generator, bool* is_get, bool* is_set,
+                                bool* is_async, bool* is_computed_name,
+                                bool* ok);
+  ExpressionT ParseObjectLiteral(bool* ok);
+  ClassLiteralPropertyT ParseClassPropertyDefinition(
+      ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
+      bool* has_seen_constructor, bool* ok);
+  FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
+                                                 bool* ok);
+  ObjectLiteralPropertyT ParseObjectPropertyDefinition(
+      ObjectLiteralChecker* checker, bool* is_computed_name, bool* ok);
+  ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
+                                 bool maybe_arrow, bool* ok);
+  ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
+                                 bool* ok) {
+    return ParseArguments(first_spread_pos, false, ok);
+  }
+
+  ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
+  ExpressionT ParseYieldExpression(bool accept_IN, bool* ok);
+  ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+  ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+  ExpressionT ParseUnaryExpression(bool* ok);
+  ExpressionT ParsePostfixExpression(bool* ok);
+  ExpressionT ParseLeftHandSideExpression(bool* ok);
+  ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async, bool* ok);
+  ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
+  ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
+                                                bool* is_async, bool* ok);
   ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
                                         const FormalParametersT& parameters,
-                                        bool is_async,
-                                        const ExpressionClassifier& classifier,
                                         bool* ok);
-  ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
-                                   ExpressionClassifier* classifier, bool* ok);
+  void ParseAsyncFunctionBody(Scope* scope, StatementListT body,
+                              FunctionKind kind, FunctionBodyType type,
+                              bool accept_IN, int pos, bool* ok);
+  ExpressionT ParseAsyncFunctionLiteral(bool* ok);
+  ExpressionT ParseClassLiteral(IdentifierT name,
+                                Scanner::Location class_name_location,
+                                bool name_is_strict_reserved,
+                                int class_token_pos, bool* ok);
+  ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
   ExpressionT ParseSuperExpression(bool is_new, bool* ok);
   ExpressionT ParseNewTargetExpression(bool* ok);
 
-  void ParseFormalParameter(FormalParametersT* parameters,
-                            ExpressionClassifier* classifier, bool* ok);
-  void ParseFormalParameterList(FormalParametersT* parameters,
-                                ExpressionClassifier* classifier, bool* ok);
+  void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
+  void ParseFormalParameterList(FormalParametersT* parameters, bool* ok);
   void CheckArityRestrictions(int param_count, FunctionKind function_type,
                               bool has_rest, int formals_start_pos,
                               int formals_end_pos, bool* ok);
 
+  BlockT ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                   DeclarationParsingResult* parsing_result,
+                                   ZoneList<const AstRawString*>* names,
+                                   bool* ok);
+  StatementT ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
+                                           bool default_export, bool* ok);
+  StatementT ParseFunctionDeclaration(bool* ok);
+  StatementT ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
+                                       bool default_export, bool* ok);
+  StatementT ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+                                       ZoneList<const AstRawString*>* names,
+                                       bool default_export, bool* ok);
+  StatementT ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+                                   bool default_export, bool* ok);
+  StatementT ParseNativeDeclaration(bool* ok);
+
+  // Under some circumstances, we allow preparsing to abort if the preparsed
+  // function is "long and trivial", and fully parse instead. Our current
+  // definition of "long and trivial" is:
+  // - over kLazyParseTrialLimit statements
+  // - all starting with an identifier (i.e., no if, for, while, etc.)
+  static const int kLazyParseTrialLimit = 200;
+
+  // TODO(nikolaos, marja): The first argument should not really be passed
+  // by value. The method is expected to add the parsed statements to the
+  // list. This works because in the case of the parser, StatementListT is
+  // a pointer whereas the preparser does not really modify the body.
+  V8_INLINE void ParseStatementList(StatementListT body, int end_token,
+                                    bool* ok) {
+    LazyParsingResult result = ParseStatementList(body, end_token, false, ok);
+    USE(result);
+    DCHECK_EQ(result, kLazyParsingComplete);
+  }
+  LazyParsingResult ParseStatementList(StatementListT body, int end_token,
+                                       bool may_abort, bool* ok);
+  StatementT ParseStatementListItem(bool* ok);
+  StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
+                            AllowLabelledFunctionStatement allow_function,
+                            bool* ok);
+  StatementT ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
+                                        bool* ok);
+  BlockT ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+
+  // Parse a SubStatement in strict mode, or with an extra block scope in
+  // sloppy mode to handle
+  // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+  // The legacy parameter indicates whether function declarations are
+  // banned by the ES2015 specification in this location, and they are being
+  // permitted here to match previous V8 behavior.
+  StatementT ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+                                  bool legacy, bool* ok);
+
+  StatementT ParseVariableStatement(VariableDeclarationContext var_context,
+                                    ZoneList<const AstRawString*>* names,
+                                    bool* ok);
+
+  // Magical syntax support.
+  ExpressionT ParseV8Intrinsic(bool* ok);
+
+  ExpressionT ParseDoExpression(bool* ok);
+
+  StatementT ParseDebuggerStatement(bool* ok);
+
+  StatementT ParseExpressionOrLabelledStatement(
+      ZoneList<const AstRawString*>* labels,
+      AllowLabelledFunctionStatement allow_function, bool* ok);
+  StatementT ParseIfStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+  StatementT ParseContinueStatement(bool* ok);
+  StatementT ParseBreakStatement(ZoneList<const AstRawString*>* labels,
+                                 bool* ok);
+  StatementT ParseReturnStatement(bool* ok);
+  StatementT ParseWithStatement(ZoneList<const AstRawString*>* labels,
+                                bool* ok);
+  StatementT ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
+                                   bool* ok);
+  StatementT ParseWhileStatement(ZoneList<const AstRawString*>* labels,
+                                 bool* ok);
+  StatementT ParseThrowStatement(bool* ok);
+  StatementT ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+                                  bool* ok);
+  StatementT ParseTryStatement(bool* ok);
+  StatementT ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+
   bool IsNextLetKeyword();
   bool IsTrivialExpression();
 
@@ -1184,9 +1305,9 @@
   bool IsValidReferenceExpression(ExpressionT expression);
 
   bool IsAssignableIdentifier(ExpressionT expression) {
-    if (!Traits::IsIdentifier(expression)) return false;
+    if (!impl()->IsIdentifier(expression)) return false;
     if (is_strict(language_mode()) &&
-        Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
+        impl()->IsEvalOrArguments(impl()->AsIdentifier(expression))) {
       return false;
     }
     return true;
@@ -1201,8 +1322,8 @@
   // forwards the information to scope.
   Call::PossiblyEval CheckPossibleEvalCall(ExpressionT expression,
                                            Scope* scope) {
-    if (Traits::IsIdentifier(expression) &&
-        Traits::IsEval(Traits::AsIdentifier(expression))) {
+    if (impl()->IsIdentifier(expression) &&
+        impl()->IsEval(impl()->AsIdentifier(expression))) {
       scope->RecordEvalCall();
       if (is_sloppy(scope->language_mode())) {
         // For sloppy scopes we also have to record the call at function level,
@@ -1214,56 +1335,33 @@
     return Call::NOT_EVAL;
   }
 
-  // Used to validate property names in object literals and class literals
-  enum PropertyKind {
-    kAccessorProperty,
-    kValueProperty,
-    kMethodProperty
-  };
-
-  class ObjectLiteralCheckerBase {
-   public:
-    explicit ObjectLiteralCheckerBase(ParserBase* parser) : parser_(parser) {}
-
-    virtual void CheckProperty(Token::Value property, PropertyKind type,
-                               MethodKind method_type,
-                               ExpressionClassifier* classifier, bool* ok) = 0;
-
-    virtual ~ObjectLiteralCheckerBase() {}
-
-   protected:
-    ParserBase* parser() const { return parser_; }
-    Scanner* scanner() const { return parser_->scanner(); }
-
-   private:
-    ParserBase* parser_;
-  };
-
   // Validation per ES6 object literals.
-  class ObjectLiteralChecker : public ObjectLiteralCheckerBase {
+  class ObjectLiteralChecker {
    public:
     explicit ObjectLiteralChecker(ParserBase* parser)
-        : ObjectLiteralCheckerBase(parser), has_seen_proto_(false) {}
+        : parser_(parser), has_seen_proto_(false) {}
 
-    void CheckProperty(Token::Value property, PropertyKind type,
-                       MethodKind method_type, ExpressionClassifier* classifier,
-                       bool* ok) override;
+    void CheckDuplicateProto(Token::Value property);
 
    private:
     bool IsProto() { return this->scanner()->LiteralMatches("__proto__", 9); }
 
+    ParserBase* parser() const { return parser_; }
+    Scanner* scanner() const { return parser_->scanner(); }
+
+    ParserBase* parser_;
     bool has_seen_proto_;
   };
 
   // Validation per ES6 class literals.
-  class ClassLiteralChecker : public ObjectLiteralCheckerBase {
+  class ClassLiteralChecker {
    public:
     explicit ClassLiteralChecker(ParserBase* parser)
-        : ObjectLiteralCheckerBase(parser), has_seen_constructor_(false) {}
+        : parser_(parser), has_seen_constructor_(false) {}
 
-    void CheckProperty(Token::Value property, PropertyKind type,
-                       MethodKind method_type, ExpressionClassifier* classifier,
-                       bool* ok) override;
+    void CheckClassMethodName(Token::Value property, PropertyKind type,
+                              bool is_generator, bool is_async, bool is_static,
+                              bool* ok);
 
    private:
     bool IsConstructor() {
@@ -1273,6 +1371,10 @@
       return this->scanner()->LiteralMatches("prototype", 9);
     }
 
+    ParserBase* parser() const { return parser_; }
+    Scanner* scanner() const { return parser_->scanner(); }
+
+    ParserBase* parser_;
     bool has_seen_constructor_;
   };
 
@@ -1281,19 +1383,63 @@
   }
   Scope* scope() const { return scope_state_->scope(); }
 
+  // Stack of expression classifiers.
+  // The top of the stack is always pointed to by classifier().
+  V8_INLINE ExpressionClassifier* classifier() const {
+    DCHECK_NOT_NULL(classifier_);
+    return classifier_;
+  }
+
+  // Accumulates the classifier that is on top of the stack (inner) to
+  // the one that is right below (outer) and pops the inner.
+  V8_INLINE void Accumulate(unsigned productions,
+                            bool merge_non_patterns = true) {
+    DCHECK_NOT_NULL(classifier_);
+    ExpressionClassifier* previous = classifier_->previous();
+    DCHECK_NOT_NULL(previous);
+    previous->Accumulate(classifier_, productions, merge_non_patterns);
+    classifier_ = previous;
+  }
+
+  // Pops and discards the classifier that is on top of the stack
+  // without accumulating.
+  V8_INLINE void Discard() {
+    DCHECK_NOT_NULL(classifier_);
+    classifier_->Discard();
+    classifier_ = classifier_->previous();
+  }
+
+  // Accumulate errors that can be arbitrarily deep in an expression.
+  // These correspond to the ECMAScript spec's 'Contains' operation
+  // on productions. This includes:
+  //
+  // - YieldExpression is disallowed in arrow parameters in a generator.
+  // - AwaitExpression is disallowed in arrow parameters in an async function.
+  // - AwaitExpression is disallowed in async arrow parameters.
+  //
+  V8_INLINE void AccumulateFormalParameterContainmentErrors() {
+    Accumulate(ExpressionClassifier::FormalParameterInitializerProduction |
+               ExpressionClassifier::AsyncArrowFormalParametersProduction);
+  }
+
+  // Parser base's protected field members.
+
   ScopeState* scope_state_;        // Scope stack.
   FunctionState* function_state_;  // Function state stack.
   v8::Extension* extension_;
   FuncNameInferrer* fni_;
   AstValueFactory* ast_value_factory_;  // Not owned.
-  typename Traits::Type::Factory ast_node_factory_;
+  typename Types::Factory ast_node_factory_;
   ParserRecorder* log_;
   Mode mode_;
   bool parsing_module_;
   uintptr_t stack_limit_;
 
+  // Parser base's private field members.
+
  private:
   Zone* zone_;
+  ExpressionClassifier* classifier_;
 
   Scanner* scanner_;
   bool stack_overflow_;
@@ -1308,6 +1454,7 @@
   bool allow_harmony_async_await_;
   bool allow_harmony_restrictive_generators_;
   bool allow_harmony_trailing_commas_;
+  bool allow_harmony_class_fields_;
 
   friend class DiscardableZoneScope;
 };
@@ -1315,12 +1462,12 @@
 template <typename Impl>
 ParserBase<Impl>::FunctionState::FunctionState(
     FunctionState** function_state_stack, ScopeState** scope_stack,
-    Scope* scope, FunctionKind kind)
+    DeclarationScope* scope)
     : ScopeState(scope_stack, scope),
       next_materialized_literal_index_(0),
       expected_property_count_(0),
-      kind_(kind),
-      generator_object_variable_(NULL),
+      generator_object_variable_(nullptr),
+      promise_variable_(nullptr),
       function_state_stack_(function_state_stack),
       outer_function_state_(*function_state_stack),
       destructuring_assignments_to_rewrite_(16, scope->zone()),
@@ -1413,19 +1560,18 @@
     MessageTemplate::Template message) {
   const char* arg;
   GetUnexpectedTokenMessage(token, &message, &source_location, &arg);
-  Traits::ReportMessageAt(source_location, message, arg);
+  impl()->ReportMessageAt(source_location, message, arg);
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
     AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
   ExpressionClassifier classifier(this);
-  auto result =
-      ParseAndClassifyIdentifier(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
+  auto result = ParseAndClassifyIdentifier(CHECK_OK_CUSTOM(EmptyIdentifier));
 
   if (allow_restricted_identifiers == kDontAllowRestrictedIdentifiers) {
-    ValidateAssignmentPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
-    ValidateBindingPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
+    ValidateAssignmentPattern(CHECK_OK_CUSTOM(EmptyIdentifier));
+    ValidateBindingPattern(CHECK_OK_CUSTOM(EmptyIdentifier));
   }
 
   return result;
@@ -1433,33 +1579,32 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::IdentifierT
-ParserBase<Impl>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
-                                             bool* ok) {
+ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
   Token::Value next = Next();
   if (next == Token::IDENTIFIER || next == Token::ASYNC ||
       (next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
-    IdentifierT name = this->GetSymbol(scanner());
+    IdentifierT name = impl()->GetSymbol();
     // When this function is used to read a formal parameter, we don't always
     // know whether the function is going to be strict or sloppy.  Indeed for
     // arrow functions we don't always know that the identifier we are reading
     // is actually a formal parameter.  Therefore besides the errors that we
     // must detect because we know we're in strict mode, we also record any
     // error that we might make in the future once we know the language mode.
-    if (this->IsEvalOrArguments(name)) {
-      classifier->RecordStrictModeFormalParameterError(
+    if (impl()->IsEvalOrArguments(name)) {
+      classifier()->RecordStrictModeFormalParameterError(
           scanner()->location(), MessageTemplate::kStrictEvalArguments);
       if (is_strict(language_mode())) {
-        classifier->RecordBindingPatternError(
+        classifier()->RecordBindingPatternError(
             scanner()->location(), MessageTemplate::kStrictEvalArguments);
       }
     } else if (next == Token::AWAIT) {
-      classifier->RecordAsyncArrowFormalParametersError(
+      classifier()->RecordAsyncArrowFormalParametersError(
           scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
     }
 
-    if (classifier->duplicate_finder() != nullptr &&
-        scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
-      classifier->RecordDuplicateFormalParameterError(scanner()->location());
+    if (classifier()->duplicate_finder() != nullptr &&
+        scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+      classifier()->RecordDuplicateFormalParameterError(scanner()->location());
     }
     return name;
   } else if (is_sloppy(language_mode()) &&
@@ -1467,25 +1612,25 @@
               next == Token::ESCAPED_STRICT_RESERVED_WORD ||
               next == Token::LET || next == Token::STATIC ||
               (next == Token::YIELD && !is_generator()))) {
-    classifier->RecordStrictModeFormalParameterError(
+    classifier()->RecordStrictModeFormalParameterError(
         scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
     if (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
         is_strict(language_mode())) {
       ReportUnexpectedToken(next);
       *ok = false;
-      return Traits::EmptyIdentifier();
+      return impl()->EmptyIdentifier();
     }
     if (next == Token::LET ||
         (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
          scanner()->is_literal_contextual_keyword(CStrVector("let")))) {
-      classifier->RecordLetPatternError(scanner()->location(),
-                                        MessageTemplate::kLetInLexicalBinding);
+      classifier()->RecordLetPatternError(
+          scanner()->location(), MessageTemplate::kLetInLexicalBinding);
     }
-    return this->GetSymbol(scanner());
+    return impl()->GetSymbol();
   } else {
-    this->ReportUnexpectedToken(next);
+    ReportUnexpectedToken(next);
     *ok = false;
-    return Traits::EmptyIdentifier();
+    return impl()->EmptyIdentifier();
   }
 }
 
@@ -1505,10 +1650,10 @@
   } else {
     ReportUnexpectedToken(next);
     *ok = false;
-    return Traits::EmptyIdentifier();
+    return impl()->EmptyIdentifier();
   }
 
-  return this->GetSymbol(scanner());
+  return impl()->GetSymbol();
 }
 
 template <typename Impl>
@@ -1521,12 +1666,12 @@
       next != Token::FUTURE_STRICT_RESERVED_WORD &&
       next != Token::ESCAPED_KEYWORD &&
       next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
-    this->ReportUnexpectedToken(next);
+    ReportUnexpectedToken(next);
     *ok = false;
-    return Traits::EmptyIdentifier();
+    return impl()->EmptyIdentifier();
   }
 
-  return this->GetSymbol(scanner());
+  return impl()->GetSymbol();
 }
 
 template <typename Impl>
@@ -1537,18 +1682,18 @@
     Next();
     ReportMessage(MessageTemplate::kUnterminatedRegExp);
     *ok = false;
-    return Traits::EmptyExpression();
+    return impl()->EmptyExpression();
   }
 
   int literal_index = function_state_->NextMaterializedLiteralIndex();
 
-  IdentifierT js_pattern = this->GetNextSymbol(scanner());
+  IdentifierT js_pattern = impl()->GetNextSymbol();
   Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
   if (flags.IsNothing()) {
     Next();
     ReportMessage(MessageTemplate::kMalformedRegExpFlags);
     *ok = false;
-    return Traits::EmptyExpression();
+    return impl()->EmptyExpression();
   }
   int js_flags = flags.FromJust();
   Next();
@@ -1557,7 +1702,7 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
-    ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+    bool* is_async, bool* ok) {
   // PrimaryExpression ::
   //   'this'
   //   'null'
@@ -1573,14 +1718,14 @@
   //   '(' Expression ')'
   //   TemplateLiteral
   //   do Block
-  //   AsyncFunctionExpression
+  //   AsyncFunctionLiteral
 
   int beg_pos = peek_position();
   switch (peek()) {
     case Token::THIS: {
-      BindingPatternUnexpectedToken(classifier);
+      BindingPatternUnexpectedToken();
       Consume(Token::THIS);
-      return this->ThisExpression(beg_pos);
+      return impl()->ThisExpression(beg_pos);
     }
 
     case Token::NULL_LITERAL:
@@ -1588,15 +1733,15 @@
     case Token::FALSE_LITERAL:
     case Token::SMI:
     case Token::NUMBER:
-      BindingPatternUnexpectedToken(classifier);
-      return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
+      BindingPatternUnexpectedToken();
+      return impl()->ExpressionFromLiteral(Next(), beg_pos);
 
     case Token::ASYNC:
       if (allow_harmony_async_await() &&
           !scanner()->HasAnyLineTerminatorAfterNext() &&
           PeekAhead() == Token::FUNCTION) {
         Consume(Token::ASYNC);
-        return impl()->ParseAsyncFunctionExpression(CHECK_OK);
+        return ParseAsyncFunctionLiteral(CHECK_OK);
       }
       // CoverCallExpressionAndAsyncArrowHead
       *is_async = true;
@@ -1609,28 +1754,28 @@
     case Token::ESCAPED_STRICT_RESERVED_WORD:
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       // Using eval or arguments in this context is OK even in strict mode.
-      IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
-      return this->ExpressionFromIdentifier(name, beg_pos,
-                                            scanner()->location().end_pos);
+      IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
+      return impl()->ExpressionFromIdentifier(name, beg_pos,
+                                              scanner()->location().end_pos);
     }
 
     case Token::STRING: {
-      BindingPatternUnexpectedToken(classifier);
+      BindingPatternUnexpectedToken();
       Consume(Token::STRING);
-      return this->ExpressionFromString(beg_pos, scanner(), factory());
+      return impl()->ExpressionFromString(beg_pos);
     }
 
     case Token::ASSIGN_DIV:
     case Token::DIV:
-      classifier->RecordBindingPatternError(
+      classifier()->RecordBindingPatternError(
           scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
-      return this->ParseRegExpLiteral(ok);
+      return ParseRegExpLiteral(ok);
 
     case Token::LBRACK:
-      return this->ParseArrayLiteral(classifier, ok);
+      return ParseArrayLiteral(ok);
 
     case Token::LBRACE:
-      return this->ParseObjectLiteral(classifier, ok);
+      return ParseObjectLiteral(ok);
 
     case Token::LPAREN: {
       // Arrow function formal parameters are either a single identifier or a
@@ -1638,61 +1783,34 @@
       // Parentheses are not valid on the LHS of a BindingPattern, so we use the
       // is_valid_binding_pattern() check to detect multiple levels of
       // parenthesization.
-      bool pattern_error = !classifier->is_valid_binding_pattern();
-      classifier->RecordPatternError(scanner()->peek_location(),
-                                     MessageTemplate::kUnexpectedToken,
-                                     Token::String(Token::LPAREN));
-      if (pattern_error) ArrowFormalParametersUnexpectedToken(classifier);
+      bool pattern_error = !classifier()->is_valid_binding_pattern();
+      classifier()->RecordPatternError(scanner()->peek_location(),
+                                       MessageTemplate::kUnexpectedToken,
+                                       Token::String(Token::LPAREN));
+      if (pattern_error) ArrowFormalParametersUnexpectedToken();
       Consume(Token::LPAREN);
       if (Check(Token::RPAREN)) {
         // ()=>x.  The continuation that looks for the => is in
         // ParseAssignmentExpression.
-        classifier->RecordExpressionError(scanner()->location(),
-                                          MessageTemplate::kUnexpectedToken,
-                                          Token::String(Token::RPAREN));
+        classifier()->RecordExpressionError(scanner()->location(),
+                                            MessageTemplate::kUnexpectedToken,
+                                            Token::String(Token::RPAREN));
         return factory()->NewEmptyParentheses(beg_pos);
-      } else if (Check(Token::ELLIPSIS)) {
-        // (...x)=>x.  The continuation that looks for the => is in
-        // ParseAssignmentExpression.
-        int ellipsis_pos = position();
-        int expr_pos = peek_position();
-        classifier->RecordExpressionError(scanner()->location(),
-                                          MessageTemplate::kUnexpectedToken,
-                                          Token::String(Token::ELLIPSIS));
-        classifier->RecordNonSimpleParameter();
-        ExpressionClassifier binding_classifier(this);
-        ExpressionT expr = this->ParseAssignmentExpression(
-            true, &binding_classifier, CHECK_OK);
-        classifier->Accumulate(&binding_classifier,
-                               ExpressionClassifier::AllProductions);
-        if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
-          classifier->RecordArrowFormalParametersError(
-              Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
-              MessageTemplate::kInvalidRestParameter);
-        }
-        if (peek() == Token::COMMA) {
-          ReportMessageAt(scanner()->peek_location(),
-                          MessageTemplate::kParamAfterRest);
-          *ok = false;
-          return this->EmptyExpression();
-        }
-        Expect(Token::RPAREN, CHECK_OK);
-        return factory()->NewSpread(expr, ellipsis_pos, expr_pos);
       }
       // Heuristically try to detect immediately called functions before
       // seeing the call parentheses.
       function_state_->set_next_function_is_parenthesized(peek() ==
                                                           Token::FUNCTION);
-      ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
+      ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
       Expect(Token::RPAREN, CHECK_OK);
       return expr;
     }
 
     case Token::CLASS: {
-      BindingPatternUnexpectedToken(classifier);
+      BindingPatternUnexpectedToken();
       Consume(Token::CLASS);
-      int class_token_position = position();
-      IdentifierT name = this->EmptyIdentifier();
+      int class_token_pos = position();
+      IdentifierT name = impl()->EmptyIdentifier();
       bool is_strict_reserved_name = false;
       Scanner::Location class_name_location = Scanner::Location::invalid();
       if (peek_any_identifier()) {
@@ -1700,28 +1818,26 @@
                                                    CHECK_OK);
         class_name_location = scanner()->location();
       }
-      return impl()->ParseClassLiteral(classifier, name, class_name_location,
-                                       is_strict_reserved_name,
-                                       class_token_position, ok);
+      return ParseClassLiteral(name, class_name_location,
+                               is_strict_reserved_name, class_token_pos, ok);
     }
 
     case Token::TEMPLATE_SPAN:
     case Token::TEMPLATE_TAIL:
-      BindingPatternUnexpectedToken(classifier);
-      return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
-                                        classifier, ok);
+      BindingPatternUnexpectedToken();
+      return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, ok);
 
     case Token::MOD:
       if (allow_natives() || extension_ != NULL) {
-        BindingPatternUnexpectedToken(classifier);
-        return impl()->ParseV8Intrinsic(ok);
+        BindingPatternUnexpectedToken();
+        return ParseV8Intrinsic(ok);
       }
       break;
 
     case Token::DO:
       if (allow_harmony_do_expressions()) {
-        BindingPatternUnexpectedToken(classifier);
-        return impl()->ParseDoExpression(ok);
+        BindingPatternUnexpectedToken();
+        return ParseDoExpression(ok);
       }
       break;
 
@@ -1731,78 +1847,71 @@
 
   ReportUnexpectedToken(Next());
   *ok = false;
-  return this->EmptyExpression();
+  return impl()->EmptyExpression();
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
     bool accept_IN, bool* ok) {
   ExpressionClassifier classifier(this);
-  ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
-  impl()->RewriteNonPattern(&classifier, CHECK_OK);
+  ExpressionT result = ParseExpressionCoverGrammar(accept_IN, CHECK_OK);
+  impl()->RewriteNonPattern(CHECK_OK);
   return result;
 }
 
 template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
-    bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
   // Expression ::
   //   AssignmentExpression
   //   Expression ',' AssignmentExpression
 
-  ExpressionT result;
-  {
+  ExpressionT result = impl()->EmptyExpression();
+  while (true) {
+    int comma_pos = position();
     ExpressionClassifier binding_classifier(this);
-    result = this->ParseAssignmentExpression(accept_IN, &binding_classifier,
-                                             CHECK_OK);
-    classifier->Accumulate(&binding_classifier,
-                           ExpressionClassifier::AllProductions);
-  }
-  bool is_simple_parameter_list = this->IsIdentifier(result);
-  bool seen_rest = false;
-  while (peek() == Token::COMMA) {
-    CheckNoTailCallExpressions(classifier, CHECK_OK);
-    if (seen_rest) {
-      // At this point the production can't possibly be valid, but we don't know
-      // which error to signal.
-      classifier->RecordArrowFormalParametersError(
-          scanner()->peek_location(), MessageTemplate::kParamAfterRest);
+    ExpressionT right;
+    if (Check(Token::ELLIPSIS)) {
+      // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
+      // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
+      // valid expression.
+      classifier()->RecordExpressionError(scanner()->location(),
+                                          MessageTemplate::kUnexpectedToken,
+                                          Token::String(Token::ELLIPSIS));
+      int ellipsis_pos = position();
+      int pattern_pos = peek_position();
+      ExpressionT pattern = ParsePrimaryExpression(CHECK_OK);
+      ValidateBindingPattern(CHECK_OK);
+      right = factory()->NewSpread(pattern, ellipsis_pos, pattern_pos);
+    } else {
+      right = ParseAssignmentExpression(accept_IN, CHECK_OK);
     }
-    Consume(Token::COMMA);
-    bool is_rest = false;
+    // No need to accumulate binding pattern-related errors, since
+    // an Expression can't be a binding pattern anyway.
+    impl()->Accumulate(ExpressionClassifier::AllProductions &
+                       ~(ExpressionClassifier::BindingPatternProduction |
+                         ExpressionClassifier::LetPatternProduction));
+    if (!impl()->IsIdentifier(right)) classifier()->RecordNonSimpleParameter();
+    if (impl()->IsEmptyExpression(result)) {
+      // First time through the loop.
+      result = right;
+    } else {
+      result =
+          factory()->NewBinaryOperation(Token::COMMA, result, right, comma_pos);
+    }
+
+    if (!Check(Token::COMMA)) break;
+
+    if (right->IsSpread()) {
+      classifier()->RecordArrowFormalParametersError(
+          scanner()->location(), MessageTemplate::kParamAfterRest);
+    }
+
     if (allow_harmony_trailing_commas() && peek() == Token::RPAREN &&
         PeekAhead() == Token::ARROW) {
       // a trailing comma is allowed at the end of an arrow parameter list
       break;
-    } else if (peek() == Token::ELLIPSIS) {
-      // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
-      // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
-      // valid expression or binding pattern.
-      ExpressionUnexpectedToken(classifier);
-      BindingPatternUnexpectedToken(classifier);
-      Consume(Token::ELLIPSIS);
-      seen_rest = is_rest = true;
     }
-    int pos = position(), expr_pos = peek_position();
-    ExpressionClassifier binding_classifier(this);
-    ExpressionT right = this->ParseAssignmentExpression(
-        accept_IN, &binding_classifier, CHECK_OK);
-    classifier->Accumulate(&binding_classifier,
-                           ExpressionClassifier::AllProductions);
-    if (is_rest) {
-      if (!this->IsIdentifier(right) && !IsValidPattern(right)) {
-        classifier->RecordArrowFormalParametersError(
-            Scanner::Location(pos, scanner()->location().end_pos),
-            MessageTemplate::kInvalidRestParameter);
-      }
-      right = factory()->NewSpread(right, pos, expr_pos);
-    }
-    is_simple_parameter_list =
-        is_simple_parameter_list && this->IsIdentifier(right);
-    result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
-  }
-  if (!is_simple_parameter_list || seen_rest) {
-    classifier->RecordNonSimpleParameter();
   }
 
   return result;
@@ -1810,26 +1919,23 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
-    ExpressionClassifier* classifier, bool* ok) {
+    bool* ok) {
   // ArrayLiteral ::
   //   '[' Expression? (',' Expression?)* ']'
 
   int pos = peek_position();
-  typename Traits::Type::ExpressionList values =
-      this->NewExpressionList(4, zone_);
+  ExpressionListT values = impl()->NewExpressionList(4);
   int first_spread_index = -1;
   Expect(Token::LBRACK, CHECK_OK);
   while (peek() != Token::RBRACK) {
     ExpressionT elem;
     if (peek() == Token::COMMA) {
-      elem = this->GetLiteralTheHole(peek_position(), factory());
+      elem = impl()->GetLiteralTheHole(peek_position());
     } else if (peek() == Token::ELLIPSIS) {
       int start_pos = peek_position();
       Consume(Token::ELLIPSIS);
       int expr_pos = peek_position();
-      ExpressionT argument =
-          this->ParseAssignmentExpression(true, classifier, CHECK_OK);
-      CheckNoTailCallExpressions(classifier, CHECK_OK);
+      ExpressionT argument = ParseAssignmentExpression(true, CHECK_OK);
       elem = factory()->NewSpread(argument, start_pos, expr_pos);
 
       if (first_spread_index < 0) {
@@ -1837,25 +1943,23 @@
       }
 
       if (argument->IsAssignment()) {
-        classifier->RecordPatternError(
+        classifier()->RecordPatternError(
             Scanner::Location(start_pos, scanner()->location().end_pos),
             MessageTemplate::kInvalidDestructuringTarget);
       } else {
-        CheckDestructuringElement(argument, classifier, start_pos,
+        CheckDestructuringElement(argument, start_pos,
                                   scanner()->location().end_pos);
       }
 
       if (peek() == Token::COMMA) {
-        classifier->RecordPatternError(
+        classifier()->RecordPatternError(
             Scanner::Location(start_pos, scanner()->location().end_pos),
             MessageTemplate::kElementAfterRest);
       }
     } else {
       int beg_pos = peek_position();
-      elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
-      CheckNoTailCallExpressions(classifier, CHECK_OK);
-      CheckDestructuringElement(elem, classifier, beg_pos,
-                                scanner()->location().end_pos);
+      elem = ParseAssignmentExpression(true, CHECK_OK);
+      CheckDestructuringElement(elem, beg_pos, scanner()->location().end_pos);
     }
     values->Add(elem, zone_);
     if (peek() != Token::RBRACK) {
@@ -1878,19 +1982,87 @@
       // to change.  Also, this error message will never appear while pre-
       // parsing (this is OK, as it is an implementation limitation).
       ReportMessage(MessageTemplate::kTooManySpreads);
-      return this->EmptyExpression();
+      return impl()->EmptyExpression();
     }
   }
   return result;
 }
 
 template <class Impl>
+bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
+                                                PropertyKind* kind) {
+  // This returns true, setting the property kind, iff the given token is one
+  // which must occur after a property name, indicating that the previous token
+  // was in fact a name and not a modifier (like the "get" in "get x").
+  switch (token) {
+    case Token::COLON:
+      *kind = PropertyKind::kValueProperty;
+      return true;
+    case Token::COMMA:
+    case Token::RBRACE:
+    case Token::ASSIGN:
+      *kind = PropertyKind::kShorthandProperty;
+      return true;
+    case Token::LPAREN:
+      *kind = PropertyKind::kMethodProperty;
+      return true;
+    case Token::MUL:
+    case Token::SEMICOLON:
+      *kind = PropertyKind::kClassField;
+      return true;
+    default:
+      break;
+  }
+  return false;
+}
+
+template <class Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
-    IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
-    ExpressionClassifier* classifier, bool* ok) {
+    IdentifierT* name, PropertyKind* kind, bool* is_generator, bool* is_get,
+    bool* is_set, bool* is_async, bool* is_computed_name, bool* ok) {
+  DCHECK(*kind == PropertyKind::kNotSet);
+  DCHECK(!*is_generator);
+  DCHECK(!*is_get);
+  DCHECK(!*is_set);
+  DCHECK(!*is_async);
+  DCHECK(!*is_computed_name);
+
+  *is_generator = Check(Token::MUL);
+  if (*is_generator) {
+    *kind = PropertyKind::kMethodProperty;
+  }
+
   Token::Value token = peek();
   int pos = peek_position();
 
+  if (allow_harmony_async_await() && !*is_generator && token == Token::ASYNC &&
+      !scanner()->HasAnyLineTerminatorAfterNext()) {
+    Consume(Token::ASYNC);
+    token = peek();
+    if (SetPropertyKindFromToken(token, kind)) {
+      *name = impl()->GetSymbol();  // TODO(bakkot) specialize on 'async'
+      impl()->PushLiteralName(*name);
+      return factory()->NewStringLiteral(*name, pos);
+    }
+    *kind = PropertyKind::kMethodProperty;
+    *is_async = true;
+    pos = peek_position();
+  }
+
+  if (token == Token::IDENTIFIER && !*is_generator && !*is_async) {
+    // This is checking for 'get' and 'set' in particular.
+    Consume(Token::IDENTIFIER);
+    token = peek();
+    if (SetPropertyKindFromToken(token, kind) ||
+        !scanner()->IsGetOrSet(is_get, is_set)) {
+      *name = impl()->GetSymbol();
+      impl()->PushLiteralName(*name);
+      return factory()->NewStringLiteral(*name, pos);
+    }
+    *kind = PropertyKind::kAccessorProperty;
+    pos = peek_position();
+  }
+
   // For non computed property names we normalize the name a bit:
   //
   //   "12" -> 12
@@ -1900,274 +2072,417 @@
   //
   // This is important because we use the property name as a key in a hash
   // table when we compute constant properties.
+  ExpressionT expression = impl()->EmptyExpression();
   switch (token) {
     case Token::STRING:
       Consume(Token::STRING);
-      *name = this->GetSymbol(scanner());
+      *name = impl()->GetSymbol();
       break;
 
     case Token::SMI:
       Consume(Token::SMI);
-      *name = this->GetNumberAsSymbol(scanner());
+      *name = impl()->GetNumberAsSymbol();
       break;
 
     case Token::NUMBER:
       Consume(Token::NUMBER);
-      *name = this->GetNumberAsSymbol(scanner());
+      *name = impl()->GetNumberAsSymbol();
       break;
 
     case Token::LBRACK: {
+      *name = impl()->EmptyIdentifier();
       *is_computed_name = true;
       Consume(Token::LBRACK);
       ExpressionClassifier computed_name_classifier(this);
-      ExpressionT expression =
-          ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
-      impl()->RewriteNonPattern(&computed_name_classifier, CHECK_OK);
-      classifier->Accumulate(&computed_name_classifier,
-                             ExpressionClassifier::ExpressionProductions);
+      expression = ParseAssignmentExpression(true, CHECK_OK);
+      impl()->RewriteNonPattern(CHECK_OK);
+      impl()->AccumulateFormalParameterContainmentErrors();
       Expect(Token::RBRACK, CHECK_OK);
-      return expression;
+      break;
     }
 
     default:
       *name = ParseIdentifierName(CHECK_OK);
-      scanner()->IsGetOrSet(is_get, is_set);
       break;
   }
 
+  if (*kind == PropertyKind::kNotSet) {
+    SetPropertyKindFromToken(peek(), kind);
+  }
+
+  if (*is_computed_name) {
+    return expression;
+  }
+
+  impl()->PushLiteralName(*name);
+
   uint32_t index;
-  return this->IsArrayIndex(*name, &index)
+  return impl()->IsArrayIndex(*name, &index)
              ? factory()->NewNumberLiteral(index, pos)
              : factory()->NewStringLiteral(*name, pos);
 }
 
 template <typename Impl>
-typename ParserBase<Impl>::ObjectLiteralPropertyT
-ParserBase<Impl>::ParsePropertyDefinition(
-    ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
-    MethodKind method_kind, bool* is_computed_name, bool* has_seen_constructor,
-    ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
-  DCHECK(!in_class || IsStaticMethod(method_kind) ||
-         has_seen_constructor != nullptr);
+typename ParserBase<Impl>::ClassLiteralPropertyT
+ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
+                                               bool has_extends,
+                                               bool* is_computed_name,
+                                               bool* has_seen_constructor,
+                                               bool* ok) {
+  DCHECK(has_seen_constructor != nullptr);
   bool is_get = false;
   bool is_set = false;
-  bool is_generator = Check(Token::MUL);
+  bool is_generator = false;
   bool is_async = false;
-  const bool is_static = IsStaticMethod(method_kind);
+  bool is_static = false;
+  PropertyKind kind = PropertyKind::kNotSet;
 
   Token::Value name_token = peek();
 
-  if (is_generator) {
-    method_kind |= MethodKind::kGenerator;
-  } else if (allow_harmony_async_await() && name_token == Token::ASYNC &&
-             !scanner()->HasAnyLineTerminatorAfterNext() &&
-             PeekAhead() != Token::LPAREN && PeekAhead()) {
-    is_async = true;
+  IdentifierT name = impl()->EmptyIdentifier();
+  ExpressionT name_expression;
+  if (name_token == Token::STATIC) {
+    Consume(Token::STATIC);
+    if (peek() == Token::LPAREN) {
+      kind = PropertyKind::kMethodProperty;
+      name = impl()->GetSymbol();  // TODO(bakkot) specialize on 'static'
+      name_expression = factory()->NewStringLiteral(name, position());
+    } else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
+               peek() == Token::RBRACE) {
+      name = impl()->GetSymbol();  // TODO(bakkot) specialize on 'static'
+      name_expression = factory()->NewStringLiteral(name, position());
+    } else {
+      is_static = true;
+      name_expression = ParsePropertyName(
+          &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+          is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+    }
+  } else {
+    name_expression = ParsePropertyName(
+        &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+        is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
   }
 
+  switch (kind) {
+    case PropertyKind::kClassField:
+    case PropertyKind::kNotSet:  // This case is a name followed by a name or
+                                 // other property. Here we have to assume
+                                 // that's an uninitialized field followed by a
+                                 // linebreak followed by a property, with ASI
+                                 // adding the semicolon. If not, there will be
+                                 // a syntax error after parsing the first name
+                                 // as an uninitialized field.
+    case PropertyKind::kShorthandProperty:
+    case PropertyKind::kValueProperty:
+      if (allow_harmony_class_fields()) {
+        bool has_initializer = Check(Token::ASSIGN);
+        ExpressionT function_literal = ParseClassFieldForInitializer(
+            has_initializer, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+        ExpectSemicolon(CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+        return factory()->NewClassLiteralProperty(
+            name_expression, function_literal, ClassLiteralProperty::FIELD,
+            is_static, *is_computed_name);
+      } else {
+        ReportUnexpectedToken(Next());
+        *ok = false;
+        return impl()->EmptyClassLiteralProperty();
+      }
+
+    case PropertyKind::kMethodProperty: {
+      DCHECK(!is_get && !is_set);
+
+      // MethodDefinition
+      //    PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+      //    '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+
+      if (!*is_computed_name) {
+        checker->CheckClassMethodName(
+            name_token, PropertyKind::kMethodProperty, is_generator, is_async,
+            is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+      }
+
+      FunctionKind kind = is_generator
+                              ? FunctionKind::kConciseGeneratorMethod
+                              : is_async ? FunctionKind::kAsyncConciseMethod
+                                         : FunctionKind::kConciseMethod;
+
+      if (!is_static && impl()->IsConstructor(name)) {
+        *has_seen_constructor = true;
+        kind = has_extends ? FunctionKind::kSubclassConstructor
+                           : FunctionKind::kBaseConstructor;
+      }
+
+      ExpressionT value = impl()->ParseFunctionLiteral(
+          name, scanner()->location(), kSkipFunctionNameCheck, kind,
+          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+          language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+
+      return factory()->NewClassLiteralProperty(name_expression, value,
+                                                ClassLiteralProperty::METHOD,
+                                                is_static, *is_computed_name);
+    }
+
+    case PropertyKind::kAccessorProperty: {
+      DCHECK((is_get || is_set) && !is_generator && !is_async);
+
+      if (!*is_computed_name) {
+        checker->CheckClassMethodName(
+            name_token, PropertyKind::kAccessorProperty, false, false,
+            is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+        // Make sure the name expression is a string since we need a Name for
+        // Runtime_DefineAccessorPropertyUnchecked and since we can determine
+        // this statically we can skip the extra runtime check.
+        name_expression =
+            factory()->NewStringLiteral(name, name_expression->position());
+      }
+
+      FunctionKind kind = is_get ? FunctionKind::kGetterFunction
+                                 : FunctionKind::kSetterFunction;
+
+      FunctionLiteralT value = impl()->ParseFunctionLiteral(
+          name, scanner()->location(), kSkipFunctionNameCheck, kind,
+          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+          language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+
+      if (!*is_computed_name) {
+        impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
+      }
+
+      return factory()->NewClassLiteralProperty(
+          name_expression, value,
+          is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER,
+          is_static, *is_computed_name);
+    }
+  }
+  UNREACHABLE();
+  return impl()->EmptyClassLiteralProperty();
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::FunctionLiteralT
+ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
+                                                bool* ok) {
+  // Makes a concise method which evaluates and returns the initialized value
+  // (or undefined if absent).
+  FunctionKind kind = FunctionKind::kConciseMethod;
+  DeclarationScope* initializer_scope = NewFunctionScope(kind);
+  initializer_scope->set_start_position(scanner()->location().end_pos);
+  FunctionState initializer_state(&function_state_, &scope_state_,
+                                  initializer_scope);
+  DCHECK(scope() == initializer_scope);
+  scope()->SetLanguageMode(STRICT);
+  ExpressionClassifier expression_classifier(this);
+  ExpressionT value;
+  if (has_initializer) {
+    value = this->ParseAssignmentExpression(
+        true, CHECK_OK_CUSTOM(EmptyFunctionLiteral));
+    impl()->RewriteNonPattern(CHECK_OK_CUSTOM(EmptyFunctionLiteral));
+  } else {
+    value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+  }
+  initializer_scope->set_end_position(scanner()->location().end_pos);
+  typename Types::StatementList body = impl()->NewStatementList(1);
+  body->Add(factory()->NewReturnStatement(value, kNoSourcePosition), zone());
+  FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
+      impl()->EmptyIdentifierString(), initializer_scope, body,
+      initializer_state.materialized_literal_count(),
+      initializer_state.expected_property_count(), 0,
+      FunctionLiteral::kNoDuplicateParameters,
+      FunctionLiteral::kAnonymousExpression,
+      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+  function_literal->set_is_class_field_initializer(true);
+  return function_literal;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ObjectLiteralPropertyT
+ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
+                                                bool* is_computed_name,
+                                                bool* ok) {
+  bool is_get = false;
+  bool is_set = false;
+  bool is_generator = false;
+  bool is_async = false;
+  PropertyKind kind = PropertyKind::kNotSet;
+
+  IdentifierT name = impl()->EmptyIdentifier();
+  Token::Value name_token = peek();
   int next_beg_pos = scanner()->peek_location().beg_pos;
   int next_end_pos = scanner()->peek_location().end_pos;
-  ExpressionT name_expression =
-      ParsePropertyName(name, &is_get, &is_set, is_computed_name, classifier,
-                        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
-  if (fni_ != nullptr && !*is_computed_name) {
-    this->PushLiteralName(fni_, *name);
-  }
+  ExpressionT name_expression = ParsePropertyName(
+      &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+      is_computed_name, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
-  if (!in_class && !is_generator) {
-    DCHECK(!IsStaticMethod(method_kind));
-    if (peek() == Token::COLON) {
-      // PropertyDefinition
-      //    PropertyName ':' AssignmentExpression
+  switch (kind) {
+    case PropertyKind::kValueProperty: {
+      DCHECK(!is_get && !is_set && !is_generator && !is_async);
+
       if (!*is_computed_name) {
-        checker->CheckProperty(name_token, kValueProperty, MethodKind::kNormal,
-                               classifier,
-                               CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        checker->CheckDuplicateProto(name_token);
       }
       Consume(Token::COLON);
       int beg_pos = peek_position();
-      ExpressionT value = this->ParseAssignmentExpression(
-          true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-      CheckDestructuringElement(value, classifier, beg_pos,
-                                scanner()->location().end_pos);
+      ExpressionT value = ParseAssignmentExpression(
+          true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+      CheckDestructuringElement(value, beg_pos, scanner()->location().end_pos);
 
-      return factory()->NewObjectLiteralProperty(name_expression, value,
-                                                 is_static, *is_computed_name);
+      ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
+          name_expression, value, *is_computed_name);
+
+      if (!*is_computed_name) {
+        impl()->SetFunctionNameFromPropertyName(result, name);
+      }
+
+      return result;
     }
 
-    if (Token::IsIdentifier(name_token, language_mode(), this->is_generator(),
-                            parsing_module_ || is_async_function()) &&
-        (peek() == Token::COMMA || peek() == Token::RBRACE ||
-         peek() == Token::ASSIGN)) {
+    case PropertyKind::kShorthandProperty: {
       // PropertyDefinition
       //    IdentifierReference
       //    CoverInitializedName
       //
       // CoverInitializedName
       //    IdentifierReference Initializer?
-      if (classifier->duplicate_finder() != nullptr &&
-          scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
-        classifier->RecordDuplicateFormalParameterError(scanner()->location());
+      DCHECK(!is_get && !is_set && !is_generator && !is_async);
+
+      if (!Token::IsIdentifier(name_token, language_mode(),
+                               this->is_generator(),
+                               parsing_module_ || is_async_function())) {
+        ReportUnexpectedToken(Next());
+        *ok = false;
+        return impl()->EmptyObjectLiteralProperty();
       }
 
-      if (this->IsEvalOrArguments(*name) && is_strict(language_mode())) {
-        classifier->RecordBindingPatternError(
+      DCHECK(!*is_computed_name);
+
+      if (classifier()->duplicate_finder() != nullptr &&
+          scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+        classifier()->RecordDuplicateFormalParameterError(
+            scanner()->location());
+      }
+
+      if (impl()->IsEvalOrArguments(name) && is_strict(language_mode())) {
+        classifier()->RecordBindingPatternError(
             scanner()->location(), MessageTemplate::kStrictEvalArguments);
       }
 
       if (name_token == Token::LET) {
-        classifier->RecordLetPatternError(
+        classifier()->RecordLetPatternError(
             scanner()->location(), MessageTemplate::kLetInLexicalBinding);
       }
       if (name_token == Token::AWAIT) {
         DCHECK(!is_async_function());
-        classifier->RecordAsyncArrowFormalParametersError(
+        classifier()->RecordAsyncArrowFormalParametersError(
             Scanner::Location(next_beg_pos, next_end_pos),
             MessageTemplate::kAwaitBindingIdentifier);
       }
       ExpressionT lhs =
-          this->ExpressionFromIdentifier(*name, next_beg_pos, next_end_pos);
-      CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
+          impl()->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos);
+      CheckDestructuringElement(lhs, next_beg_pos, next_end_pos);
 
       ExpressionT value;
       if (peek() == Token::ASSIGN) {
         Consume(Token::ASSIGN);
         ExpressionClassifier rhs_classifier(this);
-        ExpressionT rhs = this->ParseAssignmentExpression(
-            true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-        impl()->RewriteNonPattern(&rhs_classifier,
-                                  CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-        classifier->Accumulate(&rhs_classifier,
-                               ExpressionClassifier::ExpressionProductions);
+        ExpressionT rhs = ParseAssignmentExpression(
+            true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        impl()->RewriteNonPattern(CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        impl()->AccumulateFormalParameterContainmentErrors();
         value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
                                          kNoSourcePosition);
-        classifier->RecordObjectLiteralError(
+        classifier()->RecordExpressionError(
             Scanner::Location(next_beg_pos, scanner()->location().end_pos),
             MessageTemplate::kInvalidCoverInitializedName);
 
-        Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
+        impl()->SetFunctionNameFromIdentifierRef(rhs, lhs);
       } else {
         value = lhs;
       }
 
       return factory()->NewObjectLiteralProperty(
-          name_expression, value, ObjectLiteralProperty::COMPUTED, is_static,
-          false);
-    }
-  }
-
-  // Method definitions are never valid in patterns.
-  classifier->RecordPatternError(
-      Scanner::Location(next_beg_pos, scanner()->location().end_pos),
-      MessageTemplate::kInvalidDestructuringTarget);
-
-  if (is_async && !IsSpecialMethod(method_kind)) {
-    DCHECK(!is_get);
-    DCHECK(!is_set);
-    bool dont_care;
-    name_expression = ParsePropertyName(
-        name, &dont_care, &dont_care, is_computed_name, classifier,
-        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-    method_kind |= MethodKind::kAsync;
-  }
-
-  if (is_generator || peek() == Token::LPAREN) {
-    // MethodDefinition
-    //    PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
-    //    '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
-    if (!*is_computed_name) {
-      checker->CheckProperty(name_token, kMethodProperty, method_kind,
-                             classifier,
-                             CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+          name_expression, value, ObjectLiteralProperty::COMPUTED, false);
     }
 
-    FunctionKind kind = is_generator
-                            ? FunctionKind::kConciseGeneratorMethod
-                            : is_async ? FunctionKind::kAsyncConciseMethod
-                                       : FunctionKind::kConciseMethod;
+    case PropertyKind::kMethodProperty: {
+      DCHECK(!is_get && !is_set);
 
-    if (in_class && !IsStaticMethod(method_kind) &&
-        this->IsConstructor(*name)) {
-      *has_seen_constructor = true;
-      kind = has_extends ? FunctionKind::kSubclassConstructor
-                         : FunctionKind::kBaseConstructor;
+      // MethodDefinition
+      //    PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+      //    '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+
+      classifier()->RecordPatternError(
+          Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+          MessageTemplate::kInvalidDestructuringTarget);
+
+      FunctionKind kind = is_generator
+                              ? FunctionKind::kConciseGeneratorMethod
+                              : is_async ? FunctionKind::kAsyncConciseMethod
+                                         : FunctionKind::kConciseMethod;
+
+      ExpressionT value = impl()->ParseFunctionLiteral(
+          name, scanner()->location(), kSkipFunctionNameCheck, kind,
+          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+          language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+      return factory()->NewObjectLiteralProperty(
+          name_expression, value, ObjectLiteralProperty::COMPUTED,
+          *is_computed_name);
     }
 
-    ExpressionT value = impl()->ParseFunctionLiteral(
-        *name, scanner()->location(), kSkipFunctionNameCheck, kind,
-        kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
-        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+    case PropertyKind::kAccessorProperty: {
+      DCHECK((is_get || is_set) && !(is_set && is_get) && !is_generator &&
+             !is_async);
 
-    return factory()->NewObjectLiteralProperty(name_expression, value,
-                                               ObjectLiteralProperty::COMPUTED,
-                                               is_static, *is_computed_name);
-  }
+      classifier()->RecordPatternError(
+          Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+          MessageTemplate::kInvalidDestructuringTarget);
 
-  if (in_class && name_token == Token::STATIC && IsNormalMethod(method_kind)) {
-    // ClassElement (static)
-    //    'static' MethodDefinition
-    *name = this->EmptyIdentifier();
-    ObjectLiteralPropertyT property = ParsePropertyDefinition(
-        checker, true, has_extends, MethodKind::kStatic, is_computed_name,
-        nullptr, classifier, name, ok);
-    impl()->RewriteNonPattern(classifier, ok);
-    return property;
-  }
+      if (!*is_computed_name) {
+        // Make sure the name expression is a string since we need a Name for
+        // Runtime_DefineAccessorPropertyUnchecked and since we can determine
+        // this statically we can skip the extra runtime check.
+        name_expression =
+            factory()->NewStringLiteral(name, name_expression->position());
+      }
 
-  if (is_get || is_set) {
-    // MethodDefinition (Accessors)
-    //    get PropertyName '(' ')' '{' FunctionBody '}'
-    //    set PropertyName '(' PropertySetParameterList ')' '{' FunctionBody '}'
-    *name = this->EmptyIdentifier();
-    bool dont_care = false;
-    name_token = peek();
+      FunctionKind kind = is_get ? FunctionKind::kGetterFunction
+                                 : FunctionKind::kSetterFunction;
 
-    name_expression = ParsePropertyName(
-        name, &dont_care, &dont_care, is_computed_name, classifier,
-        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+      FunctionLiteralT value = impl()->ParseFunctionLiteral(
+          name, scanner()->location(), kSkipFunctionNameCheck, kind,
+          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+          language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
-    if (!*is_computed_name) {
-      checker->CheckProperty(name_token, kAccessorProperty, method_kind,
-                             classifier,
-                             CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+      if (!*is_computed_name) {
+        impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
+      }
+
+      return factory()->NewObjectLiteralProperty(
+          name_expression, value, is_get ? ObjectLiteralProperty::GETTER
+                                         : ObjectLiteralProperty::SETTER,
+          *is_computed_name);
     }
 
-    typename Traits::Type::FunctionLiteral value = impl()->ParseFunctionLiteral(
-        *name, scanner()->location(), kSkipFunctionNameCheck,
-        is_get ? FunctionKind::kGetterFunction : FunctionKind::kSetterFunction,
-        kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
-        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-
-    // Make sure the name expression is a string since we need a Name for
-    // Runtime_DefineAccessorPropertyUnchecked and since we can determine this
-    // statically we can skip the extra runtime check.
-    if (!*is_computed_name) {
-      name_expression =
-          factory()->NewStringLiteral(*name, name_expression->position());
-    }
-
-    return factory()->NewObjectLiteralProperty(
-        name_expression, value,
-        is_get ? ObjectLiteralProperty::GETTER : ObjectLiteralProperty::SETTER,
-        is_static, *is_computed_name);
+    case PropertyKind::kClassField:
+    case PropertyKind::kNotSet:
+      ReportUnexpectedToken(Next());
+      *ok = false;
+      return impl()->EmptyObjectLiteralProperty();
   }
-
-  Token::Value next = Next();
-  ReportUnexpectedToken(next);
-  *ok = false;
-  return this->EmptyObjectLiteralProperty();
+  UNREACHABLE();
+  return impl()->EmptyObjectLiteralProperty();
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
-    ExpressionClassifier* classifier, bool* ok) {
+    bool* ok) {
   // ObjectLiteral ::
   // '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
 
   int pos = peek_position();
-  typename Traits::Type::PropertyList properties =
-      this->NewPropertyList(4, zone_);
+  typename Types::ObjectPropertyList properties =
+      impl()->NewObjectPropertyList(4);
   int number_of_boilerplate_properties = 0;
   bool has_computed_names = false;
   ObjectLiteralChecker checker(this);
@@ -2177,20 +2492,16 @@
   while (peek() != Token::RBRACE) {
     FuncNameInferrer::State fni_state(fni_);
 
-    const bool in_class = false;
-    const bool has_extends = false;
     bool is_computed_name = false;
-    IdentifierT name = this->EmptyIdentifier();
-    ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
-        &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
-        NULL, classifier, &name, CHECK_OK);
+    ObjectLiteralPropertyT property =
+        ParseObjectPropertyDefinition(&checker, &is_computed_name, CHECK_OK);
 
     if (is_computed_name) {
       has_computed_names = true;
     }
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
-    if (!has_computed_names && this->IsBoilerplateProperty(property)) {
+    if (!has_computed_names && impl()->IsBoilerplateProperty(property)) {
       number_of_boilerplate_properties++;
     }
     properties->Add(property, zone());
@@ -2201,8 +2512,6 @@
     }
 
     if (fni_ != nullptr) fni_->Infer();
-
-    Traits::SetFunctionNameFromPropertyName(property, name);
   }
   Expect(Token::RBRACE, CHECK_OK);
 
@@ -2216,16 +2525,13 @@
 }
 
 template <typename Impl>
-typename ParserBase<Impl>::Traits::Type::ExpressionList
-ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
-                                 bool maybe_arrow,
-                                 ExpressionClassifier* classifier, bool* ok) {
+typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
+    Scanner::Location* first_spread_arg_loc, bool maybe_arrow, bool* ok) {
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
   Scanner::Location spread_arg = Scanner::Location::invalid();
-  typename Traits::Type::ExpressionList result =
-      this->NewExpressionList(4, zone_);
+  ExpressionListT result = impl()->NewExpressionList(4);
   Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
   bool done = (peek() == Token::RPAREN);
   bool was_unspread = false;
@@ -2235,12 +2541,10 @@
     bool is_spread = Check(Token::ELLIPSIS);
     int expr_pos = peek_position();
 
-    ExpressionT argument = this->ParseAssignmentExpression(
-        true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
-    CheckNoTailCallExpressions(classifier, CHECK_OK_CUSTOM(NullExpressionList));
+    ExpressionT argument =
+        ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpressionList));
     if (!maybe_arrow) {
-      impl()->RewriteNonPattern(classifier,
-                                CHECK_OK_CUSTOM(NullExpressionList));
+      impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
     }
     if (is_spread) {
       if (!spread_arg.IsValid()) {
@@ -2263,7 +2567,7 @@
     if (result->length() > Code::kMaxArguments) {
       ReportMessage(MessageTemplate::kTooManyArguments);
       *ok = false;
-      return this->NullExpressionList();
+      return impl()->NullExpressionList();
     }
     done = (peek() != Token::COMMA);
     if (!done) {
@@ -2276,22 +2580,21 @@
   }
   Scanner::Location location = scanner_->location();
   if (Token::RPAREN != Next()) {
-    ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
+    impl()->ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
     *ok = false;
-    return this->NullExpressionList();
+    return impl()->NullExpressionList();
   }
   *first_spread_arg_loc = spread_arg;
 
   if (!maybe_arrow || peek() != Token::ARROW) {
     if (maybe_arrow) {
-      impl()->RewriteNonPattern(classifier,
-                                CHECK_OK_CUSTOM(NullExpressionList));
+      impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
     }
     if (spread_arg.IsValid()) {
       // Unspread parameter sequences are translated into array literals in the
       // parser. Ensure that the number of materialized literals matches between
       // the parser and preparser
-      Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+      impl()->MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
     }
   }
 
@@ -2301,9 +2604,7 @@
 // Precedence = 2
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
-                                            ExpressionClassifier* classifier,
-                                            bool* ok) {
+ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
   // AssignmentExpression ::
   //   ConditionalExpression
   //   ArrowFunction
@@ -2312,13 +2613,13 @@
   int lhs_beg_pos = peek_position();
 
   if (peek() == Token::YIELD && is_generator()) {
-    return this->ParseYieldExpression(accept_IN, classifier, ok);
+    return ParseYieldExpression(accept_IN, ok);
   }
 
   FuncNameInferrer::State fni_state(fni_);
   Checkpoint checkpoint(this);
-  ExpressionClassifier arrow_formals_classifier(this,
-                                                classifier->duplicate_finder());
+  ExpressionClassifier arrow_formals_classifier(
+      this, classifier()->duplicate_finder());
 
   Scope::Snapshot scope_snapshot(scope());
 
@@ -2328,26 +2629,23 @@
 
   bool parenthesized_formals = peek() == Token::LPAREN;
   if (!is_async && !parenthesized_formals) {
-    ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
+    ArrowFormalParametersUnexpectedToken();
   }
 
   // Parse a simple, faster sub-grammar (primary expression) if it's evident
   // that we have only a trivial expression to parse.
   ExpressionT expression;
   if (IsTrivialExpression()) {
-    expression = this->ParsePrimaryExpression(&arrow_formals_classifier,
-                                              &is_async, CHECK_OK);
+    expression = ParsePrimaryExpression(&is_async, CHECK_OK);
   } else {
-    expression = this->ParseConditionalExpression(
-        accept_IN, &arrow_formals_classifier, CHECK_OK);
+    expression = ParseConditionalExpression(accept_IN, CHECK_OK);
   }
 
-  if (is_async && this->IsIdentifier(expression) && peek_any_identifier() &&
+  if (is_async && impl()->IsIdentifier(expression) && peek_any_identifier() &&
       PeekAhead() == Token::ARROW) {
     // async Identifier => AsyncConciseBody
-    IdentifierT name =
-        ParseAndClassifyIdentifier(&arrow_formals_classifier, CHECK_OK);
-    expression = this->ExpressionFromIdentifier(
+    IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
+    expression = impl()->ExpressionFromIdentifier(
         name, position(), scanner()->location().end_pos, InferName::kNo);
     if (fni_) {
       // Remove `async` keyword from inferred name stack.
@@ -2357,26 +2655,29 @@
 
   if (peek() == Token::ARROW) {
     Scanner::Location arrow_loc = scanner()->peek_location();
-    ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
-                                  parenthesized_formals, is_async, CHECK_OK);
+    ValidateArrowFormalParameters(expression, parenthesized_formals, is_async,
+                                  CHECK_OK);
     // This reads strangely, but is correct: it checks whether any
     // sub-expression of the parameter list failed to be a valid formal
     // parameter initializer. Since YieldExpressions are banned anywhere
     // in an arrow parameter list, this is correct.
     // TODO(adamk): Rename "FormalParameterInitializerError" to refer to
     // "YieldExpression", which is its only use.
-    ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
+    ValidateFormalParameterInitializer(ok);
 
     Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
     DeclarationScope* scope =
-        this->NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
-                                        : FunctionKind::kArrowFunction);
+        NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
+                                  : FunctionKind::kArrowFunction);
     // Because the arrow's parameters were parsed in the outer scope, any
     // usage flags that might have been triggered there need to be copied
     // to the arrow scope.
     this->scope()->PropagateUsageFlagsToScope(scope);
+
+    scope_snapshot.Reparent(scope);
+
     FormalParametersT parameters(scope);
-    if (!arrow_formals_classifier.is_simple_parameter_list()) {
+    if (!classifier()->is_simple_parameter_list()) {
       scope->SetHasNonSimpleParameters();
       parameters.is_simple = false;
     }
@@ -2385,18 +2686,16 @@
 
     scope->set_start_position(lhs_beg_pos);
     Scanner::Location duplicate_loc = Scanner::Location::invalid();
-    this->ParseArrowFunctionFormalParameterList(
-        &parameters, expression, loc, &duplicate_loc, scope_snapshot, CHECK_OK);
+    impl()->DeclareArrowFunctionFormalParameters(&parameters, expression, loc,
+                                                 &duplicate_loc, CHECK_OK);
     if (duplicate_loc.IsValid()) {
-      arrow_formals_classifier.RecordDuplicateFormalParameterError(
-          duplicate_loc);
+      classifier()->RecordDuplicateFormalParameterError(duplicate_loc);
     }
-    expression = this->ParseArrowFunctionLiteral(
-        accept_IN, parameters, is_async, arrow_formals_classifier, CHECK_OK);
-    arrow_formals_classifier.Discard();
-    classifier->RecordPatternError(arrow_loc,
-                                   MessageTemplate::kUnexpectedToken,
-                                   Token::String(Token::ARROW));
+    expression = ParseArrowFunctionLiteral(accept_IN, parameters, CHECK_OK);
+    impl()->Discard();
+    classifier()->RecordPatternError(arrow_loc,
+                                     MessageTemplate::kUnexpectedToken,
+                                     Token::String(Token::ARROW));
 
     if (fni_ != nullptr) fni_->Infer();
 
@@ -2407,87 +2706,70 @@
   // form part of one.  Propagate speculative formal parameter error locations
   // (including those for binding patterns, since formal parameters can
   // themselves contain binding patterns).
-  // Do not merge pending non-pattern expressions yet!
-  unsigned productions =
-      ExpressionClassifier::FormalParametersProductions |
-      ExpressionClassifier::AsyncArrowFormalParametersProduction |
-      ExpressionClassifier::FormalParameterInitializerProduction;
+  unsigned productions = ExpressionClassifier::AllProductions &
+                         ~ExpressionClassifier::ArrowFormalParametersProduction;
 
   // Parenthesized identifiers and property references are allowed as part
-  // of a larger binding pattern, even though parenthesized patterns
+  // of a larger assignment pattern, even though parenthesized patterns
   // themselves are not allowed, e.g., "[(x)] = []". Only accumulate
   // assignment pattern errors if the parsed expression is more complex.
-  if (this->IsValidReferenceExpression(expression)) {
-    productions |= ExpressionClassifier::PatternProductions &
-                   ~ExpressionClassifier::AssignmentPatternProduction;
-  } else {
-    productions |= ExpressionClassifier::PatternProductions;
+  if (IsValidReferenceExpression(expression)) {
+    productions &= ~ExpressionClassifier::AssignmentPatternProduction;
   }
 
   const bool is_destructuring_assignment =
       IsValidPattern(expression) && peek() == Token::ASSIGN;
-  if (!is_destructuring_assignment) {
-    // This may be an expression or a pattern, so we must continue to
-    // accumulate expression-related errors.
-    productions |= ExpressionClassifier::ExpressionProduction |
-                   ExpressionClassifier::TailCallExpressionProduction |
-                   ExpressionClassifier::ObjectLiteralProduction;
+  if (is_destructuring_assignment) {
+    // This is definitely not an expression so don't accumulate
+    // expression-related errors.
+    productions &= ~(ExpressionClassifier::ExpressionProduction |
+                     ExpressionClassifier::TailCallExpressionProduction);
   }
 
-  classifier->Accumulate(&arrow_formals_classifier, productions, false);
-
   if (!Token::IsAssignmentOp(peek())) {
     // Parsed conditional expression only (no assignment).
-    // Now pending non-pattern expressions must be merged.
-    classifier->MergeNonPatterns(&arrow_formals_classifier);
+    // Pending non-pattern expressions must be merged.
+    impl()->Accumulate(productions);
     return expression;
+  } else {
+    // Pending non-pattern expressions must be discarded.
+    impl()->Accumulate(productions, false);
   }
 
-  // Now pending non-pattern expressions must be discarded.
-  arrow_formals_classifier.Discard();
-
-  CheckNoTailCallExpressions(classifier, CHECK_OK);
-
   if (is_destructuring_assignment) {
-    ValidateAssignmentPattern(classifier, CHECK_OK);
+    ValidateAssignmentPattern(CHECK_OK);
   } else {
-    expression = this->CheckAndRewriteReferenceExpression(
+    expression = CheckAndRewriteReferenceExpression(
         expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
   }
 
-  expression = this->MarkExpressionAsAssigned(expression);
+  expression = impl()->MarkExpressionAsAssigned(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   if (op != Token::ASSIGN) {
-    classifier->RecordPatternError(scanner()->location(),
-                                   MessageTemplate::kUnexpectedToken,
-                                   Token::String(op));
+    classifier()->RecordPatternError(scanner()->location(),
+                                     MessageTemplate::kUnexpectedToken,
+                                     Token::String(op));
   }
   int pos = position();
 
   ExpressionClassifier rhs_classifier(this);
 
-  ExpressionT right =
-      this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
-  CheckNoTailCallExpressions(&rhs_classifier, CHECK_OK);
-  impl()->RewriteNonPattern(&rhs_classifier, CHECK_OK);
-  classifier->Accumulate(
-      &rhs_classifier,
-      ExpressionClassifier::ExpressionProductions |
-          ExpressionClassifier::ObjectLiteralProduction |
-          ExpressionClassifier::AsyncArrowFormalParametersProduction);
+  ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+  impl()->RewriteNonPattern(CHECK_OK);
+  impl()->AccumulateFormalParameterContainmentErrors();
 
   // TODO(1231235): We try to estimate the set of properties set by
   // constructors. We define a new property whenever there is an
   // assignment to a property of 'this'. We should probably only add
   // properties if we haven't seen them before. Otherwise we'll
   // probably overestimate the number of properties.
-  if (op == Token::ASSIGN && this->IsThisProperty(expression)) {
+  if (op == Token::ASSIGN && impl()->IsThisProperty(expression)) {
     function_state_->AddProperty();
   }
 
-  this->CheckAssigningFunctionLiteralToProperty(expression, right);
+  impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
 
   if (fni_ != NULL) {
     // Check if the right hand side is a call to avoid inferring a
@@ -2502,7 +2784,7 @@
   }
 
   if (op == Token::ASSIGN) {
-    Traits::SetFunctionNameFromIdentifierRef(right, expression);
+    impl()->SetFunctionNameFromIdentifierRef(right, expression);
   }
 
   if (op == Token::ASSIGN_EXP) {
@@ -2522,19 +2804,19 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
-    bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+    bool accept_IN, bool* ok) {
   // YieldExpression ::
   //   'yield' ([no line terminator] '*'? AssignmentExpression)?
   int pos = peek_position();
-  classifier->RecordPatternError(scanner()->peek_location(),
-                                 MessageTemplate::kInvalidDestructuringTarget);
-  classifier->RecordFormalParameterInitializerError(
+  classifier()->RecordPatternError(
+      scanner()->peek_location(), MessageTemplate::kInvalidDestructuringTarget);
+  classifier()->RecordFormalParameterInitializerError(
       scanner()->peek_location(), MessageTemplate::kYieldInParameter);
   Expect(Token::YIELD, CHECK_OK);
   ExpressionT generator_object =
       factory()->NewVariableProxy(function_state_->generator_object_variable());
   // The following initialization is necessary.
-  ExpressionT expression = Traits::EmptyExpression();
+  ExpressionT expression = impl()->EmptyExpression();
   bool delegating = false;  // yield*
   if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
     if (Check(Token::MUL)) delegating = true;
@@ -2553,8 +2835,8 @@
         if (!delegating) break;
         // Delegating yields require an RHS; fall through.
       default:
-        expression = ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
+        expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
         break;
     }
   }
@@ -2563,87 +2845,18 @@
     return impl()->RewriteYieldStar(generator_object, expression, pos);
   }
 
-  expression = Traits::BuildIteratorResult(expression, false);
+  expression = impl()->BuildIteratorResult(expression, false);
   // Hackily disambiguate o from o.next and o [Symbol.iterator]().
   // TODO(verwaest): Come up with a better solution.
-  typename Traits::Type::YieldExpression yield = factory()->NewYield(
-      generator_object, expression, pos, Yield::kOnExceptionThrow);
+  ExpressionT yield = factory()->NewYield(generator_object, expression, pos,
+                                          Yield::kOnExceptionThrow);
   return yield;
 }
 
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseTailCallExpression(ExpressionClassifier* classifier,
-                                          bool* ok) {
-  // TailCallExpression::
-  //   'continue' MemberExpression  Arguments
-  //   'continue' CallExpression  Arguments
-  //   'continue' MemberExpression  TemplateLiteral
-  //   'continue' CallExpression  TemplateLiteral
-  Expect(Token::CONTINUE, CHECK_OK);
-  int pos = position();
-  int sub_expression_pos = peek_position();
-  ExpressionT expression =
-      this->ParseLeftHandSideExpression(classifier, CHECK_OK);
-  CheckNoTailCallExpressions(classifier, CHECK_OK);
-
-  Scanner::Location loc(pos, scanner()->location().end_pos);
-  if (!expression->IsCall()) {
-    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
-    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedInsideTailCall);
-    *ok = false;
-    return Traits::EmptyExpression();
-  }
-  if (Traits::IsDirectEvalCall(expression)) {
-    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
-    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCallOfEval);
-    *ok = false;
-    return Traits::EmptyExpression();
-  }
-  if (!is_strict(language_mode())) {
-    ReportMessageAt(loc, MessageTemplate::kUnexpectedSloppyTailCall);
-    *ok = false;
-    return Traits::EmptyExpression();
-  }
-  if (is_resumable()) {
-    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
-    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCall);
-    *ok = false;
-    return Traits::EmptyExpression();
-  }
-  ReturnExprContext return_expr_context =
-      function_state_->return_expr_context();
-  if (return_expr_context != ReturnExprContext::kInsideValidReturnStatement) {
-    MessageTemplate::Template msg = MessageTemplate::kNone;
-    switch (return_expr_context) {
-      case ReturnExprContext::kInsideValidReturnStatement:
-        UNREACHABLE();
-        return Traits::EmptyExpression();
-      case ReturnExprContext::kInsideValidBlock:
-        msg = MessageTemplate::kUnexpectedTailCall;
-        break;
-      case ReturnExprContext::kInsideTryBlock:
-        msg = MessageTemplate::kUnexpectedTailCallInTryBlock;
-        break;
-      case ReturnExprContext::kInsideForInOfBody:
-        msg = MessageTemplate::kUnexpectedTailCallInForInOf;
-        break;
-    }
-    ReportMessageAt(loc, msg);
-    *ok = false;
-    return Traits::EmptyExpression();
-  }
-  classifier->RecordTailCallExpressionError(
-      loc, MessageTemplate::kUnexpectedTailCall);
-  function_state_->AddExplicitTailCallExpression(expression, loc);
-  return expression;
-}
-
 // Precedence = 3
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
-                                             ExpressionClassifier* classifier,
                                              bool* ok) {
   // ConditionalExpression ::
   //   LogicalOrExpression
@@ -2651,23 +2864,20 @@
 
   int pos = peek_position();
   // We start using the binary expression parser for prec >= 4 only!
-  ExpressionT expression =
-      this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
+  ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
   if (peek() != Token::CONDITIONAL) return expression;
-  CheckNoTailCallExpressions(classifier, CHECK_OK);
-  impl()->RewriteNonPattern(classifier, CHECK_OK);
-  BindingPatternUnexpectedToken(classifier);
-  ArrowFormalParametersUnexpectedToken(classifier);
+  impl()->RewriteNonPattern(CHECK_OK);
+  BindingPatternUnexpectedToken();
+  ArrowFormalParametersUnexpectedToken();
   Consume(Token::CONDITIONAL);
   // In parsing the first assignment expression in conditional
   // expressions we always accept the 'in' keyword; see ECMA-262,
   // section 11.12, page 58.
-  ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
-  impl()->RewriteNonPattern(classifier, CHECK_OK);
+  ExpressionT left = ParseAssignmentExpression(true, CHECK_OK);
+  impl()->RewriteNonPattern(CHECK_OK);
   Expect(Token::COLON, CHECK_OK);
-  ExpressionT right =
-      ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
-  impl()->RewriteNonPattern(classifier, CHECK_OK);
+  ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+  impl()->RewriteNonPattern(CHECK_OK);
   return factory()->NewConditional(expression, left, right, pos);
 }
 
@@ -2675,30 +2885,24 @@
 // Precedence >= 4
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
-    int prec, bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+    int prec, bool accept_IN, bool* ok) {
   DCHECK(prec >= 4);
-  ExpressionT x = this->ParseUnaryExpression(classifier, CHECK_OK);
+  ExpressionT x = ParseUnaryExpression(CHECK_OK);
   for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
     // prec1 >= 4
     while (Precedence(peek(), accept_IN) == prec1) {
-      CheckNoTailCallExpressions(classifier, CHECK_OK);
-      impl()->RewriteNonPattern(classifier, CHECK_OK);
-      BindingPatternUnexpectedToken(classifier);
-      ArrowFormalParametersUnexpectedToken(classifier);
+      impl()->RewriteNonPattern(CHECK_OK);
+      BindingPatternUnexpectedToken();
+      ArrowFormalParametersUnexpectedToken();
       Token::Value op = Next();
       int pos = position();
 
       const bool is_right_associative = op == Token::EXP;
       const int next_prec = is_right_associative ? prec1 : prec1 + 1;
-      ExpressionT y =
-          ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
-      if (op != Token::OR && op != Token::AND) {
-        CheckNoTailCallExpressions(classifier, CHECK_OK);
-      }
-      impl()->RewriteNonPattern(classifier, CHECK_OK);
+      ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
+      impl()->RewriteNonPattern(CHECK_OK);
 
-      if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
-                                                       factory())) {
+      if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
         continue;
       }
 
@@ -2731,7 +2935,7 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
-    ExpressionClassifier* classifier, bool* ok) {
+    bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
   //   'delete' UnaryExpression
@@ -2747,44 +2951,42 @@
 
   Token::Value op = peek();
   if (Token::IsUnaryOp(op)) {
-    BindingPatternUnexpectedToken(classifier);
-    ArrowFormalParametersUnexpectedToken(classifier);
+    BindingPatternUnexpectedToken();
+    ArrowFormalParametersUnexpectedToken();
 
     op = Next();
     int pos = position();
-    ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
-    CheckNoTailCallExpressions(classifier, CHECK_OK);
-    impl()->RewriteNonPattern(classifier, CHECK_OK);
+    ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
 
     if (op == Token::DELETE && is_strict(language_mode())) {
-      if (this->IsIdentifier(expression)) {
+      if (impl()->IsIdentifier(expression)) {
         // "delete identifier" is a syntax error in strict mode.
         ReportMessage(MessageTemplate::kStrictDelete);
         *ok = false;
-        return this->EmptyExpression();
+        return impl()->EmptyExpression();
       }
     }
 
     if (peek() == Token::EXP) {
       ReportUnexpectedToken(Next());
       *ok = false;
-      return this->EmptyExpression();
+      return impl()->EmptyExpression();
     }
 
-    // Allow Traits do rewrite the expression.
-    return this->BuildUnaryExpression(expression, op, pos, factory());
+    // Allow the parser's implementation to rewrite the expression.
+    return impl()->BuildUnaryExpression(expression, op, pos);
   } else if (Token::IsCountOp(op)) {
-    BindingPatternUnexpectedToken(classifier);
-    ArrowFormalParametersUnexpectedToken(classifier);
+    BindingPatternUnexpectedToken();
+    ArrowFormalParametersUnexpectedToken();
     op = Next();
     int beg_pos = peek_position();
-    ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
-    CheckNoTailCallExpressions(classifier, CHECK_OK);
-    expression = this->CheckAndRewriteReferenceExpression(
+    ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+    expression = CheckAndRewriteReferenceExpression(
         expression, beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
-    this->MarkExpressionAsAssigned(expression);
-    impl()->RewriteNonPattern(classifier, CHECK_OK);
+    expression = impl()->MarkExpressionAsAssigned(expression);
+    impl()->RewriteNonPattern(CHECK_OK);
 
     return factory()->NewCountOperation(op,
                                         true /* prefix */,
@@ -2792,41 +2994,39 @@
                                         position());
 
   } else if (is_async_function() && peek() == Token::AWAIT) {
-    classifier->RecordFormalParameterInitializerError(
+    classifier()->RecordFormalParameterInitializerError(
         scanner()->peek_location(),
         MessageTemplate::kAwaitExpressionFormalParameter);
 
     int await_pos = peek_position();
     Consume(Token::AWAIT);
 
-    ExpressionT value = ParseUnaryExpression(classifier, CHECK_OK);
+    ExpressionT value = ParseUnaryExpression(CHECK_OK);
 
     return impl()->RewriteAwaitExpression(value, await_pos);
   } else {
-    return this->ParsePostfixExpression(classifier, ok);
+    return ParsePostfixExpression(ok);
   }
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
-    ExpressionClassifier* classifier, bool* ok) {
+    bool* ok) {
   // PostfixExpression ::
   //   LeftHandSideExpression ('++' | '--')?
 
   int lhs_beg_pos = peek_position();
-  ExpressionT expression =
-      this->ParseLeftHandSideExpression(classifier, CHECK_OK);
+  ExpressionT expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
       Token::IsCountOp(peek())) {
-    CheckNoTailCallExpressions(classifier, CHECK_OK);
-    BindingPatternUnexpectedToken(classifier);
-    ArrowFormalParametersUnexpectedToken(classifier);
+    BindingPatternUnexpectedToken();
+    ArrowFormalParametersUnexpectedToken();
 
-    expression = this->CheckAndRewriteReferenceExpression(
+    expression = CheckAndRewriteReferenceExpression(
         expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
-    expression = this->MarkExpressionAsAssigned(expression);
-    impl()->RewriteNonPattern(classifier, CHECK_OK);
+    expression = impl()->MarkExpressionAsAssigned(expression);
+    impl()->RewriteNonPattern(CHECK_OK);
 
     Token::Value next = Next();
     expression =
@@ -2840,40 +3040,33 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
-                                              bool* ok) {
+ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
   // LeftHandSideExpression ::
   //   (NewExpression | MemberExpression) ...
 
-  if (FLAG_harmony_explicit_tailcalls && peek() == Token::CONTINUE) {
-    return this->ParseTailCallExpression(classifier, ok);
-  }
-
   bool is_async = false;
-  ExpressionT result = this->ParseMemberWithNewPrefixesExpression(
-      classifier, &is_async, CHECK_OK);
+  ExpressionT result =
+      ParseMemberWithNewPrefixesExpression(&is_async, CHECK_OK);
 
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
-        CheckNoTailCallExpressions(classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
         Consume(Token::LBRACK);
         int pos = position();
-        ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
+        ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
         result = factory()->NewProperty(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
 
       case Token::LPAREN: {
-        CheckNoTailCallExpressions(classifier, CHECK_OK);
         int pos;
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
         if (scanner()->current_token() == Token::IDENTIFIER ||
             scanner()->current_token() == Token::SUPER ||
             scanner()->current_token() == Token::ASYNC) {
@@ -2895,36 +3088,36 @@
           }
         }
         Scanner::Location spread_pos;
-        typename Traits::Type::ExpressionList args;
-        if (V8_UNLIKELY(is_async && this->IsIdentifier(result))) {
+        ExpressionListT args;
+        if (V8_UNLIKELY(is_async && impl()->IsIdentifier(result))) {
           ExpressionClassifier async_classifier(this);
-          args = ParseArguments(&spread_pos, true, &async_classifier, CHECK_OK);
+          args = ParseArguments(&spread_pos, true, CHECK_OK);
           if (peek() == Token::ARROW) {
             if (fni_) {
               fni_->RemoveAsyncKeywordFromEnd();
             }
-            ValidateBindingPattern(&async_classifier, CHECK_OK);
-            if (!async_classifier.is_valid_async_arrow_formal_parameters()) {
+            ValidateBindingPattern(CHECK_OK);
+            ValidateFormalParameterInitializer(CHECK_OK);
+            if (!classifier()->is_valid_async_arrow_formal_parameters()) {
               ReportClassifierError(
-                  async_classifier.async_arrow_formal_parameters_error());
+                  classifier()->async_arrow_formal_parameters_error());
               *ok = false;
-              return this->EmptyExpression();
+              return impl()->EmptyExpression();
             }
             if (args->length()) {
               // async ( Arguments ) => ...
-              return Traits::ExpressionListToExpression(args);
+              return impl()->ExpressionListToExpression(args);
             }
             // async () => ...
             return factory()->NewEmptyParentheses(pos);
           } else {
-            classifier->Accumulate(&async_classifier,
-                                   ExpressionClassifier::AllProductions);
+            impl()->AccumulateFormalParameterContainmentErrors();
           }
         } else {
-          args = ParseArguments(&spread_pos, false, classifier, CHECK_OK);
+          args = ParseArguments(&spread_pos, false, CHECK_OK);
         }
 
-        ArrowFormalParametersUnexpectedToken(classifier);
+        ArrowFormalParametersUnexpectedToken();
 
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
@@ -2947,7 +3140,8 @@
         // Explicit calls to the super constructor using super() perform an
         // implicit binding assignment to the 'this' variable.
         if (is_super_call) {
-          ExpressionT this_expr = this->ThisExpression(pos);
+          result = impl()->RewriteSuperCall(result);
+          ExpressionT this_expr = impl()->ThisExpression(pos);
           result =
               factory()->NewAssignment(Token::INIT, this_expr, result, pos);
         }
@@ -2957,26 +3151,24 @@
       }
 
       case Token::PERIOD: {
-        CheckNoTailCallExpressions(classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
         Consume(Token::PERIOD);
         int pos = position();
         IdentifierT name = ParseIdentifierName(CHECK_OK);
         result = factory()->NewProperty(
             result, factory()->NewStringLiteral(name, pos), pos);
-        if (fni_ != NULL) this->PushLiteralName(fni_, name);
+        impl()->PushLiteralName(name);
         break;
       }
 
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
-        CheckNoTailCallExpressions(classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
-        result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
+        result = ParseTemplateLiteral(result, position(), CHECK_OK);
         break;
       }
 
@@ -2988,8 +3180,8 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
-    ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
+                                                       bool* ok) {
   // NewExpression ::
   //   ('new')+ MemberExpression
   //
@@ -3011,8 +3203,8 @@
   // new new foo().bar().baz means (new (new foo()).bar()).baz
 
   if (peek() == Token::NEW) {
-    BindingPatternUnexpectedToken(classifier);
-    ArrowFormalParametersUnexpectedToken(classifier);
+    BindingPatternUnexpectedToken();
+    ArrowFormalParametersUnexpectedToken();
     Consume(Token::NEW);
     int new_pos = position();
     ExpressionT result;
@@ -3022,15 +3214,13 @@
     } else if (peek() == Token::PERIOD) {
       return ParseNewTargetExpression(CHECK_OK);
     } else {
-      result = this->ParseMemberWithNewPrefixesExpression(classifier, is_async,
-                                                          CHECK_OK);
+      result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
     }
-    impl()->RewriteNonPattern(classifier, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
     if (peek() == Token::LPAREN) {
       // NewExpression with arguments.
       Scanner::Location spread_pos;
-      typename Traits::Type::ExpressionList args =
-          this->ParseArguments(&spread_pos, classifier, CHECK_OK);
+      ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
 
       if (spread_pos.IsValid()) {
         args = impl()->PrepareSpreadArguments(args);
@@ -3039,21 +3229,19 @@
         result = factory()->NewCallNew(result, args, new_pos);
       }
       // The expression can still continue with . or [ after the arguments.
-      result = this->ParseMemberExpressionContinuation(result, is_async,
-                                                       classifier, CHECK_OK);
+      result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
       return result;
     }
     // NewExpression without arguments.
-    return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
-                                 new_pos);
+    return factory()->NewCallNew(result, impl()->NewExpressionList(0), new_pos);
   }
   // No 'new' or 'super' keyword.
-  return this->ParseMemberExpression(classifier, is_async, ok);
+  return ParseMemberExpression(is_async, ok);
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
-    ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+    bool* is_async, bool* ok) {
   // MemberExpression ::
   //   (PrimaryExpression | FunctionLiteral | ClassLiteral)
   //     ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
@@ -3065,8 +3253,8 @@
   // Parse the initial primary or function expression.
   ExpressionT result;
   if (peek() == Token::FUNCTION) {
-    BindingPatternUnexpectedToken(classifier);
-    ArrowFormalParametersUnexpectedToken(classifier);
+    BindingPatternUnexpectedToken();
+    ArrowFormalParametersUnexpectedToken();
 
     Consume(Token::FUNCTION);
     int function_token_position = position();
@@ -3078,19 +3266,19 @@
 
       if (!is_generator()) {
         // TODO(neis): allow escaping into closures?
-        ReportMessageAt(scanner()->location(),
-                        MessageTemplate::kUnexpectedFunctionSent);
+        impl()->ReportMessageAt(scanner()->location(),
+                                MessageTemplate::kUnexpectedFunctionSent);
         *ok = false;
-        return this->EmptyExpression();
+        return impl()->EmptyExpression();
       }
 
-      return this->FunctionSentExpression(factory(), pos);
+      return impl()->FunctionSentExpression(pos);
     }
 
     FunctionKind function_kind = Check(Token::MUL)
                                      ? FunctionKind::kGeneratorFunction
                                      : FunctionKind::kNormalFunction;
-    IdentifierT name = this->EmptyIdentifier();
+    IdentifierT name = impl()->EmptyIdentifier();
     bool is_strict_reserved_name = false;
     Scanner::Location function_name_location = Scanner::Location::invalid();
     FunctionLiteral::FunctionType function_type =
@@ -3111,11 +3299,10 @@
     const bool is_new = false;
     result = ParseSuperExpression(is_new, CHECK_OK);
   } else {
-    result = ParsePrimaryExpression(classifier, is_async, CHECK_OK);
+    result = ParsePrimaryExpression(is_async, CHECK_OK);
   }
 
-  result =
-      ParseMemberExpressionContinuation(result, is_async, classifier, CHECK_OK);
+  result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
   return result;
 }
 
@@ -3131,20 +3318,21 @@
       IsClassConstructor(kind)) {
     if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
       scope->RecordSuperPropertyUsage();
-      return this->NewSuperPropertyReference(factory(), pos);
+      return impl()->NewSuperPropertyReference(pos);
     }
     // new super() is never allowed.
     // super() is only allowed in derived constructor
     if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
       // TODO(rossberg): This might not be the correct FunctionState for the
       // method here.
-      return this->NewSuperCallReference(factory(), pos);
+      return impl()->NewSuperCallReference(pos);
     }
   }
 
-  ReportMessageAt(scanner()->location(), MessageTemplate::kUnexpectedSuper);
+  impl()->ReportMessageAt(scanner()->location(),
+                          MessageTemplate::kUnexpectedSuper);
   *ok = false;
-  return this->EmptyExpression();
+  return impl()->EmptyExpression();
 }
 
 template <typename Impl>
@@ -3154,7 +3342,7 @@
   Consume(Token::PERIOD);
   ExpectContextualKeyword(property_name, CHECK_OK_CUSTOM(Void));
   if (scanner()->literal_contains_escapes()) {
-    Traits::ReportMessageAt(
+    impl()->ReportMessageAt(
         Scanner::Location(pos, scanner()->location().end_pos),
         MessageTemplate::kInvalidEscapedMetaProperty, full_name);
     *ok = false;
@@ -3168,63 +3356,58 @@
   ExpectMetaProperty(CStrVector("target"), "new.target", pos, CHECK_OK);
 
   if (!GetReceiverScope()->is_function_scope()) {
-    ReportMessageAt(scanner()->location(),
-                    MessageTemplate::kUnexpectedNewTarget);
+    impl()->ReportMessageAt(scanner()->location(),
+                            MessageTemplate::kUnexpectedNewTarget);
     *ok = false;
-    return this->EmptyExpression();
+    return impl()->EmptyExpression();
   }
 
-  return this->NewTargetExpression(pos);
+  return impl()->NewTargetExpression(pos);
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberExpressionContinuation(
-    ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
-    bool* ok) {
+ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
+                                                    bool* is_async, bool* ok) {
   // Parses this part of MemberExpression:
   // ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
         *is_async = false;
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
 
         Consume(Token::LBRACK);
         int pos = position();
-        ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
+        ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
         expression = factory()->NewProperty(expression, index, pos);
-        if (fni_ != NULL) {
-          this->PushPropertyName(fni_, index);
-        }
+        impl()->PushPropertyName(index);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
       case Token::PERIOD: {
         *is_async = false;
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
 
         Consume(Token::PERIOD);
         int pos = position();
         IdentifierT name = ParseIdentifierName(CHECK_OK);
         expression = factory()->NewProperty(
             expression, factory()->NewStringLiteral(name, pos), pos);
-        if (fni_ != NULL) {
-          this->PushLiteralName(fni_, name);
-        }
+        impl()->PushLiteralName(name);
         break;
       }
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
         *is_async = false;
-        impl()->RewriteNonPattern(classifier, CHECK_OK);
-        BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
+        impl()->RewriteNonPattern(CHECK_OK);
+        BindingPatternUnexpectedToken();
+        ArrowFormalParametersUnexpectedToken();
         int pos;
         if (scanner()->current_token() == Token::IDENTIFIER) {
           pos = position();
@@ -3236,62 +3419,58 @@
             expression->AsFunctionLiteral()->set_should_eager_compile();
           }
         }
-        expression =
-            ParseTemplateLiteral(expression, pos, classifier, CHECK_OK);
+        expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
         break;
       }
       case Token::ILLEGAL: {
         ReportUnexpectedTokenAt(scanner()->peek_location(), Token::ILLEGAL);
         *ok = false;
-        return this->EmptyExpression();
+        return impl()->EmptyExpression();
       }
       default:
         return expression;
     }
   }
   DCHECK(false);
-  return this->EmptyExpression();
+  return impl()->EmptyExpression();
 }
 
 template <typename Impl>
 void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
-                                            ExpressionClassifier* classifier,
                                             bool* ok) {
   // FormalParameter[Yield,GeneratorParameter] :
   //   BindingElement[?Yield, ?GeneratorParameter]
   bool is_rest = parameters->has_rest;
 
-  ExpressionT pattern =
-      ParsePrimaryExpression(classifier, CHECK_OK_CUSTOM(Void));
-  ValidateBindingPattern(classifier, CHECK_OK_CUSTOM(Void));
+  ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
+  ValidateBindingPattern(CHECK_OK_CUSTOM(Void));
 
-  if (!Traits::IsIdentifier(pattern)) {
+  if (!impl()->IsIdentifier(pattern)) {
     parameters->is_simple = false;
-    ValidateFormalParameterInitializer(classifier, CHECK_OK_CUSTOM(Void));
-    classifier->RecordNonSimpleParameter();
+    ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
+    classifier()->RecordNonSimpleParameter();
   }
 
-  ExpressionT initializer = Traits::EmptyExpression();
+  ExpressionT initializer = impl()->EmptyExpression();
   if (!is_rest && Check(Token::ASSIGN)) {
     ExpressionClassifier init_classifier(this);
-    initializer = ParseAssignmentExpression(true, &init_classifier,
-                                            CHECK_OK_CUSTOM(Void));
-    impl()->RewriteNonPattern(&init_classifier, CHECK_OK_CUSTOM(Void));
-    ValidateFormalParameterInitializer(&init_classifier, CHECK_OK_CUSTOM(Void));
+    initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
+    impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+    ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
     parameters->is_simple = false;
-    init_classifier.Discard();
-    classifier->RecordNonSimpleParameter();
+    impl()->Discard();
+    classifier()->RecordNonSimpleParameter();
 
-    Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
+    impl()->SetFunctionNameFromIdentifierRef(initializer, pattern);
   }
 
-  Traits::AddFormalParameter(parameters, pattern, initializer,
+  impl()->AddFormalParameter(parameters, pattern, initializer,
                              scanner()->location().end_pos, is_rest);
 }
 
 template <typename Impl>
-void ParserBase<Impl>::ParseFormalParameterList(
-    FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
+void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
+                                                bool* ok) {
   // FormalParameters[Yield] :
   //   [empty]
   //   FunctionRestParameter[?Yield]
@@ -3313,14 +3492,14 @@
         return;
       }
       parameters->has_rest = Check(Token::ELLIPSIS);
-      ParseFormalParameter(parameters, classifier, CHECK_OK_CUSTOM(Void));
+      ParseFormalParameter(parameters, CHECK_OK_CUSTOM(Void));
 
       if (parameters->has_rest) {
         parameters->is_simple = false;
-        classifier->RecordNonSimpleParameter();
+        classifier()->RecordNonSimpleParameter();
         if (peek() == Token::COMMA) {
-          ReportMessageAt(scanner()->peek_location(),
-                          MessageTemplate::kParamAfterRest);
+          impl()->ReportMessageAt(scanner()->peek_location(),
+                                  MessageTemplate::kParamAfterRest);
           *ok = false;
           return;
         }
@@ -3336,11 +3515,321 @@
 
   for (int i = 0; i < parameters->Arity(); ++i) {
     auto parameter = parameters->at(i);
-    Traits::DeclareFormalParameter(parameters->scope, parameter, classifier);
+    impl()->DeclareFormalParameter(parameters->scope, parameter);
   }
 }
 
 template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
+    VariableDeclarationContext var_context,
+    DeclarationParsingResult* parsing_result,
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  // VariableDeclarations ::
+  //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
+  //
+  // ES6:
+  // FIXME(marja, nikolaos): Add an up-to-date comment about ES6 variable
+  // declaration syntax.
+
+  DCHECK_NOT_NULL(parsing_result);
+  parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+  parsing_result->descriptor.declaration_pos = peek_position();
+  parsing_result->descriptor.initialization_pos = peek_position();
+
+  BlockT init_block = impl()->NullBlock();
+  if (var_context != kForStatement) {
+    init_block = factory()->NewBlock(
+        nullptr, 1, true, parsing_result->descriptor.declaration_pos);
+  }
+
+  switch (peek()) {
+    case Token::VAR:
+      parsing_result->descriptor.mode = VAR;
+      Consume(Token::VAR);
+      break;
+    case Token::CONST:
+      Consume(Token::CONST);
+      DCHECK(var_context != kStatement);
+      parsing_result->descriptor.mode = CONST;
+      break;
+    case Token::LET:
+      Consume(Token::LET);
+      DCHECK(var_context != kStatement);
+      parsing_result->descriptor.mode = LET;
+      break;
+    default:
+      UNREACHABLE();  // by current callers
+      break;
+  }
+
+  parsing_result->descriptor.scope = scope();
+  parsing_result->descriptor.hoist_scope = nullptr;
+
+  // The scope of a var/const declared variable anywhere inside a function
+  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
+  // of a let declared variable is the scope of the immediately enclosing
+  // block.
+  int bindings_start = peek_position();
+  do {
+    // Parse binding pattern.
+    FuncNameInferrer::State fni_state(fni_);
+
+    ExpressionT pattern = impl()->EmptyExpression();
+    int decl_pos = peek_position();
+    {
+      ExpressionClassifier pattern_classifier(this);
+      pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(NullBlock));
+
+      ValidateBindingPattern(CHECK_OK_CUSTOM(NullBlock));
+      if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
+        ValidateLetPattern(CHECK_OK_CUSTOM(NullBlock));
+      }
+    }
+
+    Scanner::Location variable_loc = scanner()->location();
+    bool single_name = impl()->IsIdentifier(pattern);
+
+    if (single_name) {
+      impl()->PushVariableName(impl()->AsIdentifier(pattern));
+    }
+
+    ExpressionT value = impl()->EmptyExpression();
+    int initializer_position = kNoSourcePosition;
+    if (Check(Token::ASSIGN)) {
+      ExpressionClassifier classifier(this);
+      value = ParseAssignmentExpression(var_context != kForStatement,
+                                        CHECK_OK_CUSTOM(NullBlock));
+      impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullBlock));
+      variable_loc.end_pos = scanner()->location().end_pos;
+
+      if (!parsing_result->first_initializer_loc.IsValid()) {
+        parsing_result->first_initializer_loc = variable_loc;
+      }
+
+      // Don't infer if it is "a = function(){...}();"-like expression.
+      if (single_name && fni_ != nullptr) {
+        if (!value->IsCall() && !value->IsCallNew()) {
+          fni_->Infer();
+        } else {
+          fni_->RemoveLastFunction();
+        }
+      }
+
+      impl()->SetFunctionNameFromIdentifierRef(value, pattern);
+
+      // End position of the initializer is after the assignment expression.
+      initializer_position = scanner()->location().end_pos;
+    } else {
+      if (var_context != kForStatement || !PeekInOrOf()) {
+        // ES6 'const' and binding patterns require initializers.
+        if (parsing_result->descriptor.mode == CONST ||
+            !impl()->IsIdentifier(pattern)) {
+          impl()->ReportMessageAt(
+              Scanner::Location(decl_pos, scanner()->location().end_pos),
+              MessageTemplate::kDeclarationMissingInitializer,
+              !impl()->IsIdentifier(pattern) ? "destructuring" : "const");
+          *ok = false;
+          return impl()->NullBlock();
+        }
+        // 'let x' initializes 'x' to undefined.
+        if (parsing_result->descriptor.mode == LET) {
+          value = impl()->GetLiteralUndefined(position());
+        }
+      }
+
+      // End position of the initializer is after the variable.
+      initializer_position = position();
+    }
+
+    typename DeclarationParsingResult::Declaration decl(
+        pattern, initializer_position, value);
+    if (var_context == kForStatement) {
+      // Save the declaration for further handling in ParseForStatement.
+      parsing_result->declarations.Add(decl);
+    } else {
+      // Immediately declare the variable otherwise. This avoids O(N^2)
+      // behavior (where N is the number of variables in a single
+      // declaration) in the PatternRewriter having to do with removing
+      // and adding VariableProxies to the Scope (see bug 4699).
+      impl()->DeclareAndInitializeVariables(init_block,
+                                            &parsing_result->descriptor, &decl,
+                                            names, CHECK_OK_CUSTOM(NullBlock));
+    }
+  } while (Check(Token::COMMA));
+
+  parsing_result->bindings_loc =
+      Scanner::Location(bindings_start, scanner()->location().end_pos);
+
+  DCHECK(*ok);
+  return init_block;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
+  Consume(Token::FUNCTION);
+  int pos = position();
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+    if (allow_harmony_restrictive_declarations()) {
+      impl()->ReportMessageAt(scanner()->location(),
+                              MessageTemplate::kGeneratorInLegacyContext);
+      *ok = false;
+      return impl()->NullStatement();
+    }
+  }
+  return ParseHoistableDeclaration(pos, flags, nullptr, false, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseHoistableDeclaration(
+    ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+  Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+  int pos = position();
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+  }
+  return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseHoistableDeclaration(
+    int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
+    bool default_export, bool* ok) {
+  // FunctionDeclaration ::
+  //   'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+  //   'function' '(' FormalParameters ')' '{' FunctionBody '}'
+  // GeneratorDeclaration ::
+  //   'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+  //   'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
+  //
+  // The anonymous forms are allowed iff [default_export] is true.
+  //
+  // 'function' and '*' (if present) have been consumed by the caller.
+
+  const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+  const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+  DCHECK(!is_generator || !is_async);
+
+  IdentifierT name;
+  FunctionNameValidity name_validity;
+  IdentifierT variable_name;
+  if (default_export && peek() == Token::LPAREN) {
+    impl()->GetDefaultStrings(&name, &variable_name);
+    name_validity = kSkipFunctionNameCheck;
+  } else {
+    bool is_strict_reserved;
+    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+                                               CHECK_OK_CUSTOM(NullStatement));
+    name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
+                                       : kFunctionNameValidityUnknown;
+    variable_name = name;
+  }
+
+  FuncNameInferrer::State fni_state(fni_);
+  impl()->PushEnclosingName(name);
+  FunctionLiteralT function = impl()->ParseFunctionLiteral(
+      name, scanner()->location(), name_validity,
+      is_generator ? FunctionKind::kGeneratorFunction
+                   : is_async ? FunctionKind::kAsyncFunction
+                              : FunctionKind::kNormalFunction,
+      pos, FunctionLiteral::kDeclaration, language_mode(),
+      CHECK_OK_CUSTOM(NullStatement));
+
+  return impl()->DeclareFunction(variable_name, function, pos, is_generator,
+                                 is_async, names, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
+    ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+  // ClassDeclaration ::
+  //   'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
+  //   'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
+  //
+  // The anonymous form is allowed iff [default_export] is true.
+  //
+  // 'class' is expected to be consumed by the caller.
+  //
+  // A ClassDeclaration
+  //
+  //   class C { ... }
+  //
+  // has the same semantics as:
+  //
+  //   let C = class C { ... };
+  //
+  // so rewrite it as such.
+
+  int class_token_pos = position();
+  IdentifierT name = impl()->EmptyIdentifier();
+  bool is_strict_reserved = false;
+  IdentifierT variable_name = impl()->EmptyIdentifier();
+  if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
+    impl()->GetDefaultStrings(&name, &variable_name);
+  } else {
+    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+                                               CHECK_OK_CUSTOM(NullStatement));
+    variable_name = name;
+  }
+
+  ExpressionClassifier no_classifier(this);
+  ExpressionT value =
+      ParseClassLiteral(name, scanner()->location(), is_strict_reserved,
+                        class_token_pos, CHECK_OK_CUSTOM(NullStatement));
+  int end_pos = position();
+  return impl()->DeclareClass(variable_name, value, names, class_token_pos,
+                              end_pos, ok);
+}
+
+// Language extension which is only enabled for source files loaded
+// through the API's extension mechanism.  A native function
+// declaration is resolved by looking up the function through a
+// callback provided by the extension.
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
+    bool* ok) {
+  int pos = peek_position();
+  Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+  // Allow "eval" or "arguments" for backward compatibility.
+  IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers,
+                                     CHECK_OK_CUSTOM(NullStatement));
+  Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullStatement));
+  if (peek() != Token::RPAREN) {
+    do {
+      ParseIdentifier(kAllowRestrictedIdentifiers,
+                      CHECK_OK_CUSTOM(NullStatement));
+    } while (Check(Token::COMMA));
+  }
+  Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullStatement));
+  Expect(Token::SEMICOLON, CHECK_OK_CUSTOM(NullStatement));
+  return impl()->DeclareNative(name, pos, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseAsyncFunctionDeclaration(
+    ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+  // AsyncFunctionDeclaration ::
+  //   async [no LineTerminator here] function BindingIdentifier[Await]
+  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
+  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+  int pos = position();
+  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+    *ok = false;
+    impl()->ReportUnexpectedToken(scanner()->current_token());
+    return impl()->NullStatement();
+  }
+  Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+  return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+}
+
+template <typename Impl>
 void ParserBase<Impl>::CheckArityRestrictions(int param_count,
                                               FunctionKind function_kind,
                                               bool has_rest,
@@ -3348,19 +3837,22 @@
                                               int formals_end_pos, bool* ok) {
   if (IsGetterFunction(function_kind)) {
     if (param_count != 0) {
-      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                      MessageTemplate::kBadGetterArity);
+      impl()->ReportMessageAt(
+          Scanner::Location(formals_start_pos, formals_end_pos),
+          MessageTemplate::kBadGetterArity);
       *ok = false;
     }
   } else if (IsSetterFunction(function_kind)) {
     if (param_count != 1) {
-      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                      MessageTemplate::kBadSetterArity);
+      impl()->ReportMessageAt(
+          Scanner::Location(formals_start_pos, formals_end_pos),
+          MessageTemplate::kBadSetterArity);
       *ok = false;
     }
     if (has_rest) {
-      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                      MessageTemplate::kBadSetterRestParameter);
+      impl()->ReportMessageAt(
+          Scanner::Location(formals_start_pos, formals_end_pos),
+          MessageTemplate::kBadSetterRestParameter);
       *ok = false;
     }
   }
@@ -3412,31 +3904,33 @@
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseArrowFunctionLiteral(
-    bool accept_IN, const FormalParametersT& formal_parameters, bool is_async,
-    const ExpressionClassifier& formals_classifier, bool* ok) {
+    bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
   if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
     // ASI inserts `;` after arrow parameters if a line terminator is found.
     // `=> ...` is never a valid expression, so report as syntax error.
     // If next token is not `=>`, it's a syntax error anyways.
     ReportUnexpectedTokenAt(scanner_->peek_location(), Token::ARROW);
     *ok = false;
-    return this->EmptyExpression();
+    return impl()->EmptyExpression();
   }
 
-  typename Traits::Type::StatementList body;
+  StatementListT body = impl()->NullStatementList();
   int num_parameters = formal_parameters.scope->num_parameters();
   int materialized_literal_count = -1;
   int expected_property_count = -1;
 
-  FunctionKind arrow_kind = is_async ? kAsyncArrowFunction : kArrowFunction;
+  FunctionKind kind = formal_parameters.scope->function_kind();
+  FunctionLiteral::EagerCompileHint eager_compile_hint =
+      FunctionLiteral::kShouldLazyCompile;
+  bool should_be_used_once_hint = false;
   {
     FunctionState function_state(&function_state_, &scope_state_,
-                                 formal_parameters.scope, arrow_kind);
+                                 formal_parameters.scope);
 
     function_state.SkipMaterializedLiterals(
         formal_parameters.materialized_literals_count);
 
-    this->ReindexLiterals(formal_parameters);
+    impl()->ReindexLiterals(formal_parameters);
 
     Expect(Token::ARROW, CHECK_OK);
 
@@ -3444,20 +3938,42 @@
       // Multiple statement body
       Consume(Token::LBRACE);
       DCHECK_EQ(scope(), formal_parameters.scope);
-      bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
-                               formal_parameters.scope->AllowsLazyParsing());
+      bool is_lazily_parsed =
+          (mode() == PARSE_LAZILY &&
+           formal_parameters.scope
+               ->AllowsLazyParsingWithoutUnresolvedVariables());
+      // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
+      // handling in Scope::ResolveVariable needs to change.
       if (is_lazily_parsed) {
-        body = this->NewStatementList(0, zone());
-        impl()->SkipLazyFunctionBody(&materialized_literal_count,
-                                     &expected_property_count, CHECK_OK);
+        Scanner::BookmarkScope bookmark(scanner());
+        bookmark.Set();
+        LazyParsingResult result = impl()->SkipLazyFunctionBody(
+            &materialized_literal_count, &expected_property_count, false, true,
+            CHECK_OK);
+        formal_parameters.scope->ResetAfterPreparsing(
+            ast_value_factory_, result == kLazyParsingAborted);
+
         if (formal_parameters.materialized_literals_count > 0) {
           materialized_literal_count +=
               formal_parameters.materialized_literals_count;
         }
-      } else {
+
+        if (result == kLazyParsingAborted) {
+          bookmark.Apply();
+          // Trigger eager (re-)parsing, just below this block.
+          is_lazily_parsed = false;
+
+          // This is probably an initialization function. Inform the compiler it
+          // should also eager-compile this function, and that we expect it to
+          // be used once.
+          eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
+          should_be_used_once_hint = true;
+        }
+      }
+      if (!is_lazily_parsed) {
         body = impl()->ParseEagerFunctionBody(
-            this->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
-            arrow_kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
+            impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
+            kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
         materialized_literal_count =
             function_state.materialized_literal_count();
         expected_property_count = function_state.expected_property_count();
@@ -3469,18 +3985,18 @@
              function_state_->return_expr_context());
       ReturnExprScope allow_tail_calls(
           function_state_, ReturnExprContext::kInsideValidReturnStatement);
-      body = this->NewStatementList(1, zone());
-      this->AddParameterInitializationBlock(formal_parameters, body, is_async,
-                                            CHECK_OK);
+      body = impl()->NewStatementList(1);
+      impl()->AddParameterInitializationBlock(
+          formal_parameters, body, kind == kAsyncArrowFunction, CHECK_OK);
       ExpressionClassifier classifier(this);
-      if (is_async) {
-        impl()->ParseAsyncArrowSingleExpressionBody(body, accept_IN,
-                                                    &classifier, pos, CHECK_OK);
-        impl()->RewriteNonPattern(&classifier, CHECK_OK);
+      if (kind == kAsyncArrowFunction) {
+        ParseAsyncFunctionBody(scope(), body, kAsyncArrowFunction,
+                               FunctionBodyType::kSingleExpression, accept_IN,
+                               pos, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
       } else {
-        ExpressionT expression =
-            ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
-        impl()->RewriteNonPattern(&classifier, CHECK_OK);
+        ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
+        impl()->RewriteNonPattern(CHECK_OK);
         body->Add(factory()->NewReturnStatement(expression, pos), zone());
         if (allow_tailcalls() && !is_sloppy(language_mode())) {
           // ES6 14.6.1 Static Semantics: IsInTailPosition
@@ -3499,8 +4015,8 @@
     // that duplicates are not allowed.  Of course, the arrow function may
     // itself be strict as well.
     const bool allow_duplicate_parameters = false;
-    this->ValidateFormalParameters(&formals_classifier, language_mode(),
-                                   allow_duplicate_parameters, CHECK_OK);
+    ValidateFormalParameters(language_mode(), allow_duplicate_parameters,
+                             CHECK_OK);
 
     // Validate strict mode.
     if (is_strict(language_mode())) {
@@ -3513,24 +4029,141 @@
   }
 
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
-      this->EmptyIdentifierString(), formal_parameters.scope, body,
+      impl()->EmptyIdentifierString(), formal_parameters.scope, body,
       materialized_literal_count, expected_property_count, num_parameters,
       FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, arrow_kind,
+      FunctionLiteral::kAnonymousExpression, eager_compile_hint,
       formal_parameters.scope->start_position());
 
   function_literal->set_function_token_position(
       formal_parameters.scope->start_position());
+  if (should_be_used_once_hint) {
+    function_literal->set_should_be_used_once_hint();
+  }
 
-  if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
+  impl()->AddFunctionForNameInference(function_literal);
 
   return function_literal;
 }
 
 template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
+    IdentifierT name, Scanner::Location class_name_location,
+    bool name_is_strict_reserved, int class_token_pos, bool* ok) {
+  // All parts of a ClassDeclaration and ClassExpression are strict code.
+  if (name_is_strict_reserved) {
+    impl()->ReportMessageAt(class_name_location,
+                            MessageTemplate::kUnexpectedStrictReserved);
+    *ok = false;
+    return impl()->EmptyExpression();
+  }
+  if (impl()->IsEvalOrArguments(name)) {
+    impl()->ReportMessageAt(class_name_location,
+                            MessageTemplate::kStrictEvalArguments);
+    *ok = false;
+    return impl()->EmptyExpression();
+  }
+
+  BlockState block_state(zone(), &scope_state_);
+  RaiseLanguageMode(STRICT);
+
+  ClassInfo class_info(this);
+  impl()->DeclareClassVariable(name, block_state.scope(), &class_info,
+                               class_token_pos, CHECK_OK);
+
+  if (Check(Token::EXTENDS)) {
+    block_state.set_start_position(scanner()->location().end_pos);
+    ExpressionClassifier extends_classifier(this);
+    class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
+    impl()->AccumulateFormalParameterContainmentErrors();
+  } else {
+    block_state.set_start_position(scanner()->location().end_pos);
+  }
+
+  ClassLiteralChecker checker(this);
+
+  Expect(Token::LBRACE, CHECK_OK);
+
+  const bool has_extends = !impl()->IsEmptyExpression(class_info.extends);
+  while (peek() != Token::RBRACE) {
+    if (Check(Token::SEMICOLON)) continue;
+    FuncNameInferrer::State fni_state(fni_);
+    bool is_computed_name = false;  // Classes do not care about computed
+                                    // property names here.
+    ExpressionClassifier property_classifier(this);
+    ClassLiteralPropertyT property = ParseClassPropertyDefinition(
+        &checker, has_extends, &is_computed_name,
+        &class_info.has_seen_constructor, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
+    impl()->AccumulateFormalParameterContainmentErrors();
+
+    impl()->DeclareClassProperty(name, property, &class_info, CHECK_OK);
+    impl()->InferFunctionName();
+  }
+
+  Expect(Token::RBRACE, CHECK_OK);
+  return impl()->RewriteClassLiteral(name, &class_info, class_token_pos, ok);
+}
+
+template <typename Impl>
+void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
+                                              FunctionKind kind,
+                                              FunctionBodyType body_type,
+                                              bool accept_IN, int pos,
+                                              bool* ok) {
+  scope->ForceContextAllocation();
+
+  impl()->PrepareAsyncFunctionBody(body, kind, pos);
+
+  BlockT block = factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
+
+  ExpressionT return_value = impl()->EmptyExpression();
+  if (body_type == FunctionBodyType::kNormal) {
+    ParseStatementList(block->statements(), Token::RBRACE,
+                       CHECK_OK_CUSTOM(Void));
+    return_value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+  } else {
+    return_value = ParseAssignmentExpression(accept_IN, CHECK_OK_CUSTOM(Void));
+    impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+  }
+
+  impl()->RewriteAsyncFunctionBody(body, block, return_value,
+                                   CHECK_OK_CUSTOM(Void));
+  scope->set_end_position(scanner()->location().end_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
+  // AsyncFunctionLiteral ::
+  //   async [no LineTerminator here] function ( FormalParameters[Await] )
+  //       { AsyncFunctionBody }
+  //
+  //   async [no LineTerminator here] function BindingIdentifier[Await]
+  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
+  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+  int pos = position();
+  Expect(Token::FUNCTION, CHECK_OK);
+  bool is_strict_reserved = false;
+  IdentifierT name = impl()->EmptyIdentifier();
+  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+  if (peek_any_identifier()) {
+    type = FunctionLiteral::kNamedExpression;
+    name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
+                                               &is_strict_reserved, CHECK_OK);
+  }
+  return impl()->ParseFunctionLiteral(
+      name, scanner()->location(),
+      is_strict_reserved ? kFunctionNameIsStrictReserved
+                         : kFunctionNameValidityUnknown,
+      FunctionKind::kAsyncFunction, pos, type, language_mode(), CHECK_OK);
+}
+
+template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
-    ExpressionT tag, int start, ExpressionClassifier* classifier, bool* ok) {
+    ExpressionT tag, int start, bool* ok) {
   // A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
   // text followed by a substitution expression), finalized by a single
   // TEMPLATE_TAIL.
@@ -3569,29 +4202,28 @@
     CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
     next = peek();
     if (next == Token::EOS) {
-      ReportMessageAt(Scanner::Location(start, peek_position()),
-                      MessageTemplate::kUnterminatedTemplate);
+      impl()->ReportMessageAt(Scanner::Location(start, peek_position()),
+                              MessageTemplate::kUnterminatedTemplate);
       *ok = false;
-      return Traits::EmptyExpression();
+      return impl()->EmptyExpression();
     } else if (next == Token::ILLEGAL) {
-      Traits::ReportMessageAt(
+      impl()->ReportMessageAt(
           Scanner::Location(position() + 1, peek_position()),
           MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
       *ok = false;
-      return Traits::EmptyExpression();
+      return impl()->EmptyExpression();
     }
 
     int expr_pos = peek_position();
-    ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
-    CheckNoTailCallExpressions(classifier, CHECK_OK);
-    impl()->RewriteNonPattern(classifier, CHECK_OK);
+    ExpressionT expression = ParseExpressionCoverGrammar(true, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
     impl()->AddTemplateExpression(&ts, expression);
 
     if (peek() != Token::RBRACE) {
-      ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
-                      MessageTemplate::kUnterminatedTemplateExpr);
+      impl()->ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
+                              MessageTemplate::kUnterminatedTemplateExpr);
       *ok = false;
-      return Traits::EmptyExpression();
+      return impl()->EmptyExpression();
     }
 
     // If we didn't die parsing that expression, our next token should be a
@@ -3601,16 +4233,16 @@
     pos = position();
 
     if (next == Token::EOS) {
-      ReportMessageAt(Scanner::Location(start, pos),
-                      MessageTemplate::kUnterminatedTemplate);
+      impl()->ReportMessageAt(Scanner::Location(start, pos),
+                              MessageTemplate::kUnterminatedTemplate);
       *ok = false;
-      return Traits::EmptyExpression();
+      return impl()->EmptyExpression();
     } else if (next == Token::ILLEGAL) {
-      Traits::ReportMessageAt(
+      impl()->ReportMessageAt(
           Scanner::Location(position() + 1, peek_position()),
           MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
       *ok = false;
-      return Traits::EmptyExpression();
+      return impl()->EmptyExpression();
     }
 
     impl()->AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
@@ -3627,8 +4259,8 @@
 ParserBase<Impl>::CheckAndRewriteReferenceExpression(
     ExpressionT expression, int beg_pos, int end_pos,
     MessageTemplate::Template message, bool* ok) {
-  return this->CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
-                                                  message, kReferenceError, ok);
+  return CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
+                                            message, kReferenceError, ok);
 }
 
 template <typename Impl>
@@ -3636,12 +4268,12 @@
 ParserBase<Impl>::CheckAndRewriteReferenceExpression(
     ExpressionT expression, int beg_pos, int end_pos,
     MessageTemplate::Template message, ParseErrorType type, bool* ok) {
-  if (this->IsIdentifier(expression) && is_strict(language_mode()) &&
-      this->IsEvalOrArguments(this->AsIdentifier(expression))) {
+  if (impl()->IsIdentifier(expression) && is_strict(language_mode()) &&
+      impl()->IsEvalOrArguments(impl()->AsIdentifier(expression))) {
     ReportMessageAt(Scanner::Location(beg_pos, end_pos),
                     MessageTemplate::kStrictEvalArguments, kSyntaxError);
     *ok = false;
-    return this->EmptyExpression();
+    return impl()->EmptyExpression();
   }
   if (expression->IsValidReferenceExpression()) {
     return expression;
@@ -3649,47 +4281,1140 @@
   if (expression->IsCall()) {
     // If it is a call, make it a runtime error for legacy web compatibility.
     // Rewrite `expr' to `expr[throw ReferenceError]'.
-    ExpressionT error = this->NewThrowReferenceError(message, beg_pos);
+    ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
     return factory()->NewProperty(expression, error, beg_pos);
   }
   ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
   *ok = false;
-  return this->EmptyExpression();
+  return impl()->EmptyExpression();
 }
 
 template <typename Impl>
 bool ParserBase<Impl>::IsValidReferenceExpression(ExpressionT expression) {
-  return this->IsAssignableIdentifier(expression) || expression->IsProperty();
+  return IsAssignableIdentifier(expression) || expression->IsProperty();
 }
 
 template <typename Impl>
-void ParserBase<Impl>::CheckDestructuringElement(
-    ExpressionT expression, ExpressionClassifier* classifier, int begin,
-    int end) {
+void ParserBase<Impl>::CheckDestructuringElement(ExpressionT expression,
+                                                 int begin, int end) {
   if (!IsValidPattern(expression) && !expression->IsAssignment() &&
       !IsValidReferenceExpression(expression)) {
-    classifier->RecordAssignmentPatternError(
+    classifier()->RecordAssignmentPatternError(
         Scanner::Location(begin, end),
         MessageTemplate::kInvalidDestructuringTarget);
   }
 }
 
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic(
+    bool* ok) {
+  // CallRuntime ::
+  //   '%' Identifier Arguments
+
+  int pos = peek_position();
+  Expect(Token::MOD, CHECK_OK);
+  // Allow "eval" or "arguments" for backward compatibility.
+  IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+  Scanner::Location spread_pos;
+  ExpressionClassifier classifier(this);
+  ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
+
+  DCHECK(!spread_pos.IsValid());
+
+  return impl()->NewV8Intrinsic(name, args, pos, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseDoExpression(
+    bool* ok) {
+  // AssignmentExpression ::
+  //     do '{' StatementList '}'
+
+  int pos = peek_position();
+  Expect(Token::DO, CHECK_OK);
+  BlockT block = ParseBlock(nullptr, CHECK_OK);
+  return impl()->RewriteDoExpression(block, pos, ok);
+}
+
+// Redefinition of CHECK_OK for parsing statements.
+#undef CHECK_OK
+#define CHECK_OK CHECK_OK_CUSTOM(NullStatement)
+
+template <typename Impl>
+typename ParserBase<Impl>::LazyParsingResult
+ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
+                                     bool may_abort, bool* ok) {
+  // StatementList ::
+  //   (StatementListItem)* <end_token>
+
+  // Allocate a target stack to use for this set of source
+  // elements. This way, all scripts and functions get their own
+  // target stack thus avoiding illegal breaks and continues across
+  // functions.
+  typename Types::TargetScope target_scope(this);
+  int count_statements = 0;
+
+  DCHECK(!impl()->IsNullStatementList(body));
+  bool directive_prologue = true;  // Parsing directive prologue.
+
+  while (peek() != end_token) {
+    if (directive_prologue && peek() != Token::STRING) {
+      directive_prologue = false;
+    }
+
+    bool starts_with_identifier = peek() == Token::IDENTIFIER;
+    Scanner::Location token_loc = scanner()->peek_location();
+    StatementT stat =
+        ParseStatementListItem(CHECK_OK_CUSTOM(Return, kLazyParsingComplete));
+
+    if (impl()->IsNullStatement(stat) || impl()->IsEmptyStatement(stat)) {
+      directive_prologue = false;  // End of directive prologue.
+      continue;
+    }
+
+    if (directive_prologue) {
+      // The length of the token is used to distinguish between strings literals
+      // that evaluate equal to directives but contain either escape sequences
+      // (e.g., "use \x73trict") or line continuations (e.g., "use \(newline)
+      // strict").
+      if (impl()->IsUseStrictDirective(stat) &&
+          token_loc.end_pos - token_loc.beg_pos == sizeof("use strict") + 1) {
+        // Directive "use strict" (ES5 14.1).
+        RaiseLanguageMode(STRICT);
+        if (!scope()->HasSimpleParameters()) {
+          // TC39 deemed "use strict" directives to be an error when occurring
+          // in the body of a function with non-simple parameter list, on
+          // 29/7/2015. https://goo.gl/ueA7Ln
+          impl()->ReportMessageAt(
+              token_loc, MessageTemplate::kIllegalLanguageModeDirective,
+              "use strict");
+          *ok = false;
+          return kLazyParsingComplete;
+        }
+        // Because declarations in strict eval code don't leak into the scope
+        // of the eval call, it is likely that functions declared in strict
+        // eval code will be used within the eval code, so lazy parsing is
+        // probably not a win.
+        if (scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
+      } else if (impl()->IsUseAsmDirective(stat) &&
+                 token_loc.end_pos - token_loc.beg_pos ==
+                     sizeof("use asm") + 1) {
+        // Directive "use asm".
+        impl()->SetAsmModule();
+      } else if (impl()->IsStringLiteral(stat)) {
+        // Possibly an unknown directive.
+        // Should not change mode, but will increment usage counters
+        // as appropriate. Ditto usages below.
+        RaiseLanguageMode(SLOPPY);
+      } else {
+        // End of the directive prologue.
+        directive_prologue = false;
+        RaiseLanguageMode(SLOPPY);
+      }
+    } else {
+      RaiseLanguageMode(SLOPPY);
+    }
+
+    // If we're allowed to abort, we will do so when we see a "long and
+    // trivial" function. Our current definition of "long and trivial" is:
+    // - over kLazyParseTrialLimit statements
+    // - all starting with an identifier (i.e., no if, for, while, etc.)
+    if (may_abort) {
+      if (!starts_with_identifier) {
+        may_abort = false;
+      } else if (++count_statements > kLazyParseTrialLimit) {
+        return kLazyParsingAborted;
+      }
+    }
+
+    body->Add(stat, zone());
+  }
+  return kLazyParsingComplete;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
+    bool* ok) {
+  // ECMA 262 6th Edition
+  // StatementListItem[Yield, Return] :
+  //   Statement[?Yield, ?Return]
+  //   Declaration[?Yield]
+  //
+  // Declaration[Yield] :
+  //   HoistableDeclaration[?Yield]
+  //   ClassDeclaration[?Yield]
+  //   LexicalDeclaration[In, ?Yield]
+  //
+  // HoistableDeclaration[Yield, Default] :
+  //   FunctionDeclaration[?Yield, ?Default]
+  //   GeneratorDeclaration[?Yield, ?Default]
+  //
+  // LexicalDeclaration[In, Yield] :
+  //   LetOrConst BindingList[?In, ?Yield] ;
+
+  switch (peek()) {
+    case Token::FUNCTION:
+      return ParseHoistableDeclaration(nullptr, false, ok);
+    case Token::CLASS:
+      Consume(Token::CLASS);
+      return ParseClassDeclaration(nullptr, false, ok);
+    case Token::VAR:
+    case Token::CONST:
+      return ParseVariableStatement(kStatementListItem, nullptr, ok);
+    case Token::LET:
+      if (IsNextLetKeyword()) {
+        return ParseVariableStatement(kStatementListItem, nullptr, ok);
+      }
+      break;
+    case Token::ASYNC:
+      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+          !scanner()->HasAnyLineTerminatorAfterNext()) {
+        Consume(Token::ASYNC);
+        return ParseAsyncFunctionDeclaration(nullptr, false, ok);
+      }
+    /* falls through */
+    default:
+      break;
+  }
+  return ParseStatement(nullptr, kAllowLabelledFunctionStatement, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
+    ZoneList<const AstRawString*>* labels,
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
+  // Statement ::
+  //   Block
+  //   VariableStatement
+  //   EmptyStatement
+  //   ExpressionStatement
+  //   IfStatement
+  //   IterationStatement
+  //   ContinueStatement
+  //   BreakStatement
+  //   ReturnStatement
+  //   WithStatement
+  //   LabelledStatement
+  //   SwitchStatement
+  //   ThrowStatement
+  //   TryStatement
+  //   DebuggerStatement
+
+  // Note: Since labels can only be used by 'break' and 'continue'
+  // statements, which themselves are only valid within blocks,
+  // iterations or 'switch' statements (i.e., BreakableStatements),
+  // labels can be simply ignored in all other cases; except for
+  // trivial labeled break statements 'label: break label' which is
+  // parsed into an empty statement.
+  switch (peek()) {
+    case Token::LBRACE:
+      return ParseBlock(labels, ok);
+    case Token::SEMICOLON:
+      Next();
+      return factory()->NewEmptyStatement(kNoSourcePosition);
+    case Token::IF:
+      return ParseIfStatement(labels, ok);
+    case Token::DO:
+      return ParseDoWhileStatement(labels, ok);
+    case Token::WHILE:
+      return ParseWhileStatement(labels, ok);
+    case Token::FOR:
+      return ParseForStatement(labels, ok);
+    case Token::CONTINUE:
+    case Token::BREAK:
+    case Token::RETURN:
+    case Token::THROW:
+    case Token::TRY: {
+      // These statements must have their labels preserved in an enclosing
+      // block, as the corresponding AST nodes do not currently store their
+      // labels.
+      // TODO(nikolaos, marja): Consider adding the labels to the AST nodes.
+      if (labels == nullptr) {
+        return ParseStatementAsUnlabelled(labels, ok);
+      } else {
+        BlockT result =
+            factory()->NewBlock(labels, 1, false, kNoSourcePosition);
+        typename Types::Target target(this, result);
+        StatementT statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
+        result->statements()->Add(statement, zone());
+        return result;
+      }
+    }
+    case Token::WITH:
+      return ParseWithStatement(labels, ok);
+    case Token::SWITCH:
+      return ParseSwitchStatement(labels, ok);
+    case Token::FUNCTION:
+      // FunctionDeclaration only allowed as a StatementListItem, not in
+      // an arbitrary Statement position. Exceptions such as
+      // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+      // are handled by calling ParseScopedStatement rather than
+      // ParseStatement directly.
+      impl()->ReportMessageAt(scanner()->peek_location(),
+                              is_strict(language_mode())
+                                  ? MessageTemplate::kStrictFunction
+                                  : MessageTemplate::kSloppyFunction);
+      *ok = false;
+      return impl()->NullStatement();
+    case Token::DEBUGGER:
+      return ParseDebuggerStatement(ok);
+    case Token::VAR:
+      return ParseVariableStatement(kStatement, nullptr, ok);
+    default:
+      return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
+  }
+}
+
+// This method parses a subset of statements (break, continue, return, throw,
+// try) which are to be grouped because they all require their labeles to be
+// preserved in an enclosing block.
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseStatementAsUnlabelled(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  switch (peek()) {
+    case Token::CONTINUE:
+      return ParseContinueStatement(ok);
+    case Token::BREAK:
+      return ParseBreakStatement(labels, ok);
+    case Token::RETURN:
+      return ParseReturnStatement(ok);
+    case Token::THROW:
+      return ParseThrowStatement(ok);
+    case Token::TRY:
+      return ParseTryStatement(ok);
+    default:
+      UNREACHABLE();
+      return impl()->NullStatement();
+  }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // Block ::
+  //   '{' StatementList '}'
+
+  // Construct block expecting 16 statements.
+  BlockT body = factory()->NewBlock(labels, 16, false, kNoSourcePosition);
+
+  // Parse the statements and collect escaping labels.
+  Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullBlock));
+  {
+    BlockState block_state(zone(), &scope_state_);
+    block_state.set_start_position(scanner()->location().beg_pos);
+    typename Types::Target target(this, body);
+
+    while (peek() != Token::RBRACE) {
+      StatementT stat = ParseStatementListItem(CHECK_OK_CUSTOM(NullBlock));
+      if (!impl()->IsNullStatement(stat) && !impl()->IsEmptyStatement(stat)) {
+        body->statements()->Add(stat, zone());
+      }
+    }
+
+    Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullBlock));
+    block_state.set_end_position(scanner()->location().end_pos);
+    body->set_scope(block_state.FinalizedBlockScope());
+  }
+  return body;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
+    ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
+  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+      (legacy && allow_harmony_restrictive_declarations())) {
+    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+  } else {
+    if (legacy) {
+      impl()->CountUsage(v8::Isolate::kLegacyFunctionDeclaration);
+    }
+    // Make a block around the statement for a lexical binding
+    // is introduced by a FunctionDeclaration.
+    BlockState block_state(zone(), &scope_state_);
+    block_state.set_start_position(scanner()->location().beg_pos);
+    BlockT block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
+    StatementT body = ParseFunctionDeclaration(CHECK_OK);
+    block->statements()->Add(body, zone());
+    block_state.set_end_position(scanner()->location().end_pos);
+    block->set_scope(block_state.FinalizedBlockScope());
+    return block;
+  }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
+    VariableDeclarationContext var_context,
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  // VariableStatement ::
+  //   VariableDeclarations ';'
+
+  // The scope of a var declared variable anywhere inside a function
+  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
+  // transform a source-level var declaration into a (Function) Scope
+  // declaration, and rewrite the source-level initialization into an assignment
+  // statement. We use a block to collect multiple assignments.
+  //
+  // We mark the block as initializer block because we don't want the
+  // rewriter to add a '.result' assignment to such a block (to get compliant
+  // behavior for code such as print(eval('var x = 7')), and for cosmetic
+  // reasons when pretty-printing. Also, unless an assignment (initialization)
+  // is inside an initializer block, it is ignored.
+
+  DeclarationParsingResult parsing_result;
+  StatementT result =
+      ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+  return result;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDebuggerStatement(
+    bool* ok) {
+  // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
+  // contexts this is used as a statement which invokes the debugger as i a
+  // break point is present.
+  // DebuggerStatement ::
+  //   'debugger' ';'
+
+  int pos = peek_position();
+  Expect(Token::DEBUGGER, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+  return factory()->NewDebuggerStatement(pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseExpressionOrLabelledStatement(
+    ZoneList<const AstRawString*>* labels,
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
+  // ExpressionStatement | LabelledStatement ::
+  //   Expression ';'
+  //   Identifier ':' Statement
+  //
+  // ExpressionStatement[Yield] :
+  //   [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
+
+  int pos = peek_position();
+
+  switch (peek()) {
+    case Token::FUNCTION:
+    case Token::LBRACE:
+      UNREACHABLE();  // Always handled by the callers.
+    case Token::CLASS:
+      ReportUnexpectedToken(Next());
+      *ok = false;
+      return impl()->NullStatement();
+    default:
+      break;
+  }
+
+  bool starts_with_identifier = peek_any_identifier();
+  ExpressionT expr = ParseExpression(true, CHECK_OK);
+  if (peek() == Token::COLON && starts_with_identifier &&
+      impl()->IsIdentifier(expr)) {
+    // The whole expression was a single identifier, and not, e.g.,
+    // something starting with an identifier or a parenthesized identifier.
+    labels = impl()->DeclareLabel(labels, impl()->AsIdentifierExpression(expr),
+                                  CHECK_OK);
+    Consume(Token::COLON);
+    // ES#sec-labelled-function-declarations Labelled Function Declarations
+    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+      if (allow_function == kAllowLabelledFunctionStatement) {
+        return ParseFunctionDeclaration(ok);
+      } else {
+        return ParseScopedStatement(labels, true, ok);
+      }
+    }
+    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+  }
+
+  // If we have an extension, we allow a native function declaration.
+  // A native function declaration starts with "native function" with
+  // no line-terminator between the two words.
+  if (extension_ != nullptr && peek() == Token::FUNCTION &&
+      !scanner()->HasAnyLineTerminatorBeforeNext() && impl()->IsNative(expr) &&
+      !scanner()->literal_contains_escapes()) {
+    return ParseNativeDeclaration(ok);
+  }
+
+  // Parsed expression statement, followed by semicolon.
+  ExpectSemicolon(CHECK_OK);
+  return factory()->NewExpressionStatement(expr, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // IfStatement ::
+  //   'if' '(' Expression ')' Statement ('else' Statement)?
+
+  int pos = peek_position();
+  Expect(Token::IF, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  ExpressionT condition = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  StatementT then_statement = ParseScopedStatement(labels, false, CHECK_OK);
+  StatementT else_statement = impl()->NullStatement();
+  if (Check(Token::ELSE)) {
+    else_statement = ParseScopedStatement(labels, false, CHECK_OK);
+  } else {
+    else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
+  }
+  return factory()->NewIfStatement(condition, then_statement, else_statement,
+                                   pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
+    bool* ok) {
+  // ContinueStatement ::
+  //   'continue' Identifier? ';'
+
+  int pos = peek_position();
+  Expect(Token::CONTINUE, CHECK_OK);
+  IdentifierT label = impl()->EmptyIdentifier();
+  Token::Value tok = peek();
+  if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+      tok != Token::RBRACE && tok != Token::EOS) {
+    // ECMA allows "eval" or "arguments" as labels even in strict mode.
+    label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+  }
+  typename Types::IterationStatement target =
+      impl()->LookupContinueTarget(label, CHECK_OK);
+  if (impl()->IsNullStatement(target)) {
+    // Illegal continue statement.
+    MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
+    if (!impl()->IsEmptyIdentifier(label)) {
+      message = MessageTemplate::kUnknownLabel;
+    }
+    ReportMessage(message, label);
+    *ok = false;
+    return impl()->NullStatement();
+  }
+  ExpectSemicolon(CHECK_OK);
+  return factory()->NewContinueStatement(target, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // BreakStatement ::
+  //   'break' Identifier? ';'
+
+  int pos = peek_position();
+  Expect(Token::BREAK, CHECK_OK);
+  IdentifierT label = impl()->EmptyIdentifier();
+  Token::Value tok = peek();
+  if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+      tok != Token::RBRACE && tok != Token::EOS) {
+    // ECMA allows "eval" or "arguments" as labels even in strict mode.
+    label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+  }
+  // Parse labeled break statements that target themselves into
+  // empty statements, e.g. 'l1: l2: l3: break l2;'
+  if (!impl()->IsEmptyIdentifier(label) &&
+      impl()->ContainsLabel(labels, label)) {
+    ExpectSemicolon(CHECK_OK);
+    return factory()->NewEmptyStatement(pos);
+  }
+  typename Types::BreakableStatement target =
+      impl()->LookupBreakTarget(label, CHECK_OK);
+  if (impl()->IsNullStatement(target)) {
+    // Illegal break statement.
+    MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
+    if (!impl()->IsEmptyIdentifier(label)) {
+      message = MessageTemplate::kUnknownLabel;
+    }
+    ReportMessage(message, label);
+    *ok = false;
+    return impl()->NullStatement();
+  }
+  ExpectSemicolon(CHECK_OK);
+  return factory()->NewBreakStatement(target, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
+    bool* ok) {
+  // ReturnStatement ::
+  //   'return' [no line terminator] Expression? ';'
+
+  // Consume the return token. It is necessary to do that before
+  // reporting any errors on it, because of the way errors are
+  // reported (underlining).
+  Expect(Token::RETURN, CHECK_OK);
+  Scanner::Location loc = scanner()->location();
+
+  switch (GetDeclarationScope()->scope_type()) {
+    case SCRIPT_SCOPE:
+    case EVAL_SCOPE:
+    case MODULE_SCOPE:
+      impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
+      *ok = false;
+      return impl()->NullStatement();
+    default:
+      break;
+  }
+
+  Token::Value tok = peek();
+  ExpressionT return_value = impl()->EmptyExpression();
+  if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
+      tok == Token::RBRACE || tok == Token::EOS) {
+    if (IsSubclassConstructor(function_state_->kind())) {
+      return_value = impl()->ThisExpression(loc.beg_pos);
+    } else {
+      return_value = impl()->GetLiteralUndefined(position());
+    }
+  } else {
+    if (IsSubclassConstructor(function_state_->kind())) {
+      // Because of the return code rewriting that happens in case of a subclass
+      // constructor we don't want to accept tail calls, therefore we don't set
+      // ReturnExprScope to kInsideValidReturnStatement here.
+      return_value = ParseExpression(true, CHECK_OK);
+    } else {
+      ReturnExprScope maybe_allow_tail_calls(
+          function_state_, ReturnExprContext::kInsideValidReturnStatement);
+      return_value = ParseExpression(true, CHECK_OK);
+
+      if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
+        // ES6 14.6.1 Static Semantics: IsInTailPosition
+        function_state_->AddImplicitTailCallExpression(return_value);
+      }
+    }
+  }
+  ExpectSemicolon(CHECK_OK);
+  return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
+  return factory()->NewReturnStatement(return_value, loc.beg_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // WithStatement ::
+  //   'with' '(' Expression ')' Statement
+
+  Expect(Token::WITH, CHECK_OK);
+  int pos = position();
+
+  if (is_strict(language_mode())) {
+    ReportMessage(MessageTemplate::kStrictWith);
+    *ok = false;
+    return impl()->NullStatement();
+  }
+
+  Expect(Token::LPAREN, CHECK_OK);
+  ExpressionT expr = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  Scope* with_scope = NewScope(WITH_SCOPE);
+  StatementT body = impl()->NullStatement();
+  {
+    BlockState block_state(&scope_state_, with_scope);
+    with_scope->set_start_position(scanner()->peek_location().beg_pos);
+    body = ParseScopedStatement(labels, true, CHECK_OK);
+    with_scope->set_end_position(scanner()->location().end_pos);
+  }
+  return factory()->NewWithStatement(with_scope, expr, body, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // DoStatement ::
+  //   'do' Statement 'while' '(' Expression ')' ';'
+
+  auto loop = factory()->NewDoWhileStatement(labels, peek_position());
+  typename Types::Target target(this, loop);
+
+  Expect(Token::DO, CHECK_OK);
+  StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+  Expect(Token::WHILE, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+
+  ExpressionT cond = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  // Allow do-statements to be terminated with and without
+  // semi-colons. This allows code such as 'do;while(0)return' to
+  // parse, which would not be the case if we had used the
+  // ExpectSemicolon() functionality here.
+  Check(Token::SEMICOLON);
+
+  loop->Initialize(cond, body);
+  return loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // WhileStatement ::
+  //   'while' '(' Expression ')' Statement
+
+  auto loop = factory()->NewWhileStatement(labels, peek_position());
+  typename Types::Target target(this, loop);
+
+  Expect(Token::WHILE, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  ExpressionT cond = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+
+  loop->Initialize(cond, body);
+  return loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
+    bool* ok) {
+  // ThrowStatement ::
+  //   'throw' Expression ';'
+
+  Expect(Token::THROW, CHECK_OK);
+  int pos = position();
+  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+    ReportMessage(MessageTemplate::kNewlineAfterThrow);
+    *ok = false;
+    return impl()->NullStatement();
+  }
+  ExpressionT exception = ParseExpression(true, CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+
+  return impl()->NewThrowStatement(exception, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // SwitchStatement ::
+  //   'switch' '(' Expression ')' '{' CaseClause* '}'
+  // CaseClause ::
+  //   'case' Expression ':' StatementList
+  //   'default' ':' StatementList
+
+  int switch_pos = peek_position();
+
+  Expect(Token::SWITCH, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  ExpressionT tag = ParseExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+
+  auto switch_statement = factory()->NewSwitchStatement(labels, switch_pos);
+
+  {
+    BlockState cases_block_state(zone(), &scope_state_);
+    cases_block_state.set_start_position(scanner()->location().beg_pos);
+    cases_block_state.SetNonlinear();
+    typename Types::Target target(this, switch_statement);
+
+    bool default_seen = false;
+    auto cases = impl()->NewCaseClauseList(4);
+    Expect(Token::LBRACE, CHECK_OK);
+    while (peek() != Token::RBRACE) {
+      // An empty label indicates the default case.
+      ExpressionT label = impl()->EmptyExpression();
+      if (Check(Token::CASE)) {
+        label = ParseExpression(true, CHECK_OK);
+      } else {
+        Expect(Token::DEFAULT, CHECK_OK);
+        if (default_seen) {
+          ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
+          *ok = false;
+          return impl()->NullStatement();
+        }
+        default_seen = true;
+      }
+      Expect(Token::COLON, CHECK_OK);
+      int clause_pos = position();
+      StatementListT statements = impl()->NewStatementList(5);
+      while (peek() != Token::CASE && peek() != Token::DEFAULT &&
+             peek() != Token::RBRACE) {
+        StatementT stat = ParseStatementListItem(CHECK_OK);
+        statements->Add(stat, zone());
+      }
+      auto clause = factory()->NewCaseClause(label, statements, clause_pos);
+      cases->Add(clause, zone());
+    }
+    Expect(Token::RBRACE, CHECK_OK);
+
+    cases_block_state.set_end_position(scanner()->location().end_pos);
+    return impl()->RewriteSwitchStatement(
+        tag, switch_statement, cases, cases_block_state.FinalizedBlockScope());
+  }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
+    bool* ok) {
+  // TryStatement ::
+  //   'try' Block Catch
+  //   'try' Block Finally
+  //   'try' Block Catch Finally
+  //
+  // Catch ::
+  //   'catch' '(' Identifier ')' Block
+  //
+  // Finally ::
+  //   'finally' Block
+
+  Expect(Token::TRY, CHECK_OK);
+  int pos = position();
+
+  BlockT try_block = impl()->NullBlock();
+  {
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideTryBlock);
+    try_block = ParseBlock(nullptr, CHECK_OK);
+  }
+
+  CatchInfo catch_info(this);
+  catch_info.for_promise_reject = allow_natives() && Check(Token::MOD);
+
+  if (peek() != Token::CATCH && peek() != Token::FINALLY) {
+    ReportMessage(MessageTemplate::kNoCatchOrFinally);
+    *ok = false;
+    return impl()->NullStatement();
+  }
+
+  BlockT catch_block = impl()->NullBlock();
+  if (Check(Token::CATCH)) {
+    Expect(Token::LPAREN, CHECK_OK);
+    catch_info.scope = NewScope(CATCH_SCOPE);
+    catch_info.scope->set_start_position(scanner()->location().beg_pos);
+
+    {
+      CollectExpressionsInTailPositionToListScope
+          collect_tail_call_expressions_scope(
+              function_state_, &catch_info.tail_call_expressions);
+      BlockState catch_block_state(&scope_state_, catch_info.scope);
+
+      catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
+
+      // Create a block scope to hold any lexical declarations created
+      // as part of destructuring the catch parameter.
+      {
+        BlockState catch_variable_block_state(zone(), &scope_state_);
+        catch_variable_block_state.set_start_position(
+            scanner()->location().beg_pos);
+        typename Types::Target target(this, catch_block);
+
+        // This does not simply call ParsePrimaryExpression to avoid
+        // ExpressionFromIdentifier from being called in the first
+        // branch, which would introduce an unresolved symbol and mess
+        // with arrow function names.
+        if (peek_any_identifier()) {
+          catch_info.name =
+              ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+        } else {
+          ExpressionClassifier pattern_classifier(this);
+          catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
+          ValidateBindingPattern(CHECK_OK);
+        }
+
+        Expect(Token::RPAREN, CHECK_OK);
+        impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
+        if (!impl()->IsNullStatement(catch_info.init_block)) {
+          catch_block->statements()->Add(catch_info.init_block, zone());
+        }
+
+        catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
+        catch_block->statements()->Add(catch_info.inner_block, zone());
+        impl()->ValidateCatchBlock(catch_info, CHECK_OK);
+        catch_variable_block_state.set_end_position(
+            scanner()->location().end_pos);
+        catch_block->set_scope(
+            catch_variable_block_state.FinalizedBlockScope());
+      }
+    }
+
+    catch_info.scope->set_end_position(scanner()->location().end_pos);
+  }
+
+  BlockT finally_block = impl()->NullBlock();
+  DCHECK(peek() == Token::FINALLY || !impl()->IsNullStatement(catch_block));
+  if (Check(Token::FINALLY)) {
+    finally_block = ParseBlock(nullptr, CHECK_OK);
+  }
+
+  return impl()->RewriteTryStatement(try_block, catch_block, finally_block,
+                                     catch_info, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  int stmt_pos = peek_position();
+  ForInfo for_info(this);
+  bool bound_names_are_lexical = false;
+
+  // Create an in-between scope for let-bound iteration variables.
+  BlockState for_state(zone(), &scope_state_);
+  Expect(Token::FOR, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  for_state.set_start_position(scanner()->location().beg_pos);
+  for_state.set_is_hidden();
+
+  StatementT init = impl()->NullStatement();
+  if (peek() != Token::SEMICOLON) {
+    // An initializer is present.
+    if (peek() == Token::VAR || peek() == Token::CONST ||
+        (peek() == Token::LET && IsNextLetKeyword())) {
+      // The initializer contains declarations.
+      ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
+                                nullptr, CHECK_OK);
+      bound_names_are_lexical =
+          IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
+      for_info.each_loc = scanner()->location();
+
+      if (CheckInOrOf(&for_info.mode)) {
+        // Just one declaration followed by in/of.
+        if (for_info.parsing_result.declarations.length() != 1) {
+          impl()->ReportMessageAt(
+              for_info.parsing_result.bindings_loc,
+              MessageTemplate::kForInOfLoopMultiBindings,
+              ForEachStatement::VisitModeString(for_info.mode));
+          *ok = false;
+          return impl()->NullStatement();
+        }
+        if (for_info.parsing_result.first_initializer_loc.IsValid() &&
+            (is_strict(language_mode()) ||
+             for_info.mode == ForEachStatement::ITERATE ||
+             bound_names_are_lexical ||
+             !impl()->IsIdentifier(
+                 for_info.parsing_result.declarations[0].pattern) ||
+             allow_harmony_for_in())) {
+          // Only increment the use count if we would have let this through
+          // without the flag.
+          if (allow_harmony_for_in()) {
+            impl()->CountUsage(v8::Isolate::kForInInitializer);
+          }
+          impl()->ReportMessageAt(
+              for_info.parsing_result.first_initializer_loc,
+              MessageTemplate::kForInOfLoopInitializer,
+              ForEachStatement::VisitModeString(for_info.mode));
+          *ok = false;
+          return impl()->NullStatement();
+        }
+
+        BlockT init_block = impl()->RewriteForVarInLegacy(for_info);
+
+        auto loop =
+            factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
+        typename Types::Target target(this, loop);
+
+        int each_keyword_pos = scanner()->location().beg_pos;
+
+        ExpressionT enumerable = impl()->EmptyExpression();
+        if (for_info.mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          enumerable = ParseAssignmentExpression(true, CHECK_OK);
+          impl()->RewriteNonPattern(CHECK_OK);
+        } else {
+          enumerable = ParseExpression(true, CHECK_OK);
+        }
+
+        Expect(Token::RPAREN, CHECK_OK);
+
+        StatementT final_loop = impl()->NullStatement();
+        {
+          ReturnExprScope no_tail_calls(function_state_,
+                                        ReturnExprContext::kInsideForInOfBody);
+          BlockState block_state(zone(), &scope_state_);
+          block_state.set_start_position(scanner()->location().beg_pos);
+
+          StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+
+          BlockT body_block = impl()->NullBlock();
+          ExpressionT each_variable = impl()->EmptyExpression();
+          impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
+                                                   &each_variable, CHECK_OK);
+          body_block->statements()->Add(body, zone());
+          final_loop = impl()->InitializeForEachStatement(
+              loop, each_variable, enumerable, body_block, each_keyword_pos);
+
+          block_state.set_end_position(scanner()->location().end_pos);
+          body_block->set_scope(block_state.FinalizedBlockScope());
+        }
+
+        init_block =
+            impl()->CreateForEachStatementTDZ(init_block, for_info, ok);
+
+        for_state.set_end_position(scanner()->location().end_pos);
+        Scope* for_scope = for_state.FinalizedBlockScope();
+        // Parsed for-in loop w/ variable declarations.
+        if (!impl()->IsNullStatement(init_block)) {
+          init_block->statements()->Add(final_loop, zone());
+          init_block->set_scope(for_scope);
+          return init_block;
+        } else {
+          DCHECK_NULL(for_scope);
+          return final_loop;
+        }
+      } else {
+        // One or more declaration not followed by in/of.
+        init = impl()->BuildInitializationBlock(
+            &for_info.parsing_result,
+            bound_names_are_lexical ? &for_info.bound_names : nullptr,
+            CHECK_OK);
+      }
+    } else {
+      // The initializer does not contain declarations.
+      int lhs_beg_pos = peek_position();
+      ExpressionClassifier classifier(this);
+      ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
+      int lhs_end_pos = scanner()->location().end_pos;
+
+      bool is_for_each = CheckInOrOf(&for_info.mode);
+      bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+                                              expression->IsObjectLiteral());
+
+      if (is_destructuring) {
+        ValidateAssignmentPattern(CHECK_OK);
+      } else {
+        impl()->RewriteNonPattern(CHECK_OK);
+      }
+
+      if (is_for_each) {
+        // Initializer is reference followed by in/of.
+        if (!is_destructuring) {
+          expression = impl()->CheckAndRewriteReferenceExpression(
+              expression, lhs_beg_pos, lhs_end_pos,
+              MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+        }
+
+        auto loop =
+            factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
+        typename Types::Target target(this, loop);
+
+        int each_keyword_pos = scanner()->location().beg_pos;
+
+        ExpressionT enumerable = impl()->EmptyExpression();
+        if (for_info.mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          enumerable = ParseAssignmentExpression(true, CHECK_OK);
+          impl()->RewriteNonPattern(CHECK_OK);
+        } else {
+          enumerable = ParseExpression(true, CHECK_OK);
+        }
+
+        Expect(Token::RPAREN, CHECK_OK);
+
+        {
+          ReturnExprScope no_tail_calls(function_state_,
+                                        ReturnExprContext::kInsideForInOfBody);
+          BlockState block_state(zone(), &scope_state_);
+          block_state.set_start_position(scanner()->location().beg_pos);
+
+          // For legacy compat reasons, give for loops similar treatment to
+          // if statements in allowing a function declaration for a body
+          StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+          block_state.set_end_position(scanner()->location().end_pos);
+          StatementT final_loop = impl()->InitializeForEachStatement(
+              loop, expression, enumerable, body, each_keyword_pos);
+
+          Scope* for_scope = for_state.FinalizedBlockScope();
+          DCHECK_NULL(for_scope);
+          USE(for_scope);
+          Scope* block_scope = block_state.FinalizedBlockScope();
+          DCHECK_NULL(block_scope);
+          USE(block_scope);
+          return final_loop;
+        }
+      } else {
+        // Initializer is just an expression.
+        init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
+      }
+    }
+  }
+
+  // Standard 'for' loop, we have parsed the initializer at this point.
+  auto loop = factory()->NewForStatement(labels, stmt_pos);
+  typename Types::Target target(this, loop);
+
+  Expect(Token::SEMICOLON, CHECK_OK);
+
+  ExpressionT cond = impl()->EmptyExpression();
+  StatementT next = impl()->NullStatement();
+  StatementT body = impl()->NullStatement();
+
+  // If there are let bindings, then condition and the next statement of the
+  // for loop must be parsed in a new scope.
+  Scope* inner_scope = scope();
+  // TODO(verwaest): Allocate this through a ScopeState as well.
+  if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+    inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
+    inner_scope->set_start_position(scanner()->location().beg_pos);
+  }
+  {
+    BlockState block_state(&scope_state_, inner_scope);
+
+    if (peek() != Token::SEMICOLON) {
+      cond = ParseExpression(true, CHECK_OK);
+    }
+    Expect(Token::SEMICOLON, CHECK_OK);
+
+    if (peek() != Token::RPAREN) {
+      ExpressionT exp = ParseExpression(true, CHECK_OK);
+      next = factory()->NewExpressionStatement(exp, exp->position());
+    }
+    Expect(Token::RPAREN, CHECK_OK);
+
+    body = ParseScopedStatement(nullptr, true, CHECK_OK);
+  }
+
+  if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+    auto result = impl()->DesugarLexicalBindingsInForStatement(
+        loop, init, cond, next, body, inner_scope, for_info, CHECK_OK);
+    for_state.set_end_position(scanner()->location().end_pos);
+    return result;
+  } else {
+    for_state.set_end_position(scanner()->location().end_pos);
+    Scope* for_scope = for_state.FinalizedBlockScope();
+    if (for_scope != nullptr) {
+      // Rewrite a for statement of the form
+      //   for (const x = i; c; n) b
+      //
+      // into
+      //
+      //   {
+      //     const x = i;
+      //     for (; c; n) b
+      //   }
+      //
+      // or, desugar
+      //   for (; c; n) b
+      // into
+      //   {
+      //     for (; c; n) b
+      //   }
+      // just in case b introduces a lexical binding some other way, e.g., if b
+      // is a FunctionDeclaration.
+      BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
+      if (!impl()->IsNullStatement(init)) {
+        block->statements()->Add(init, zone());
+      }
+      block->statements()->Add(loop, zone());
+      block->set_scope(for_scope);
+      loop->Initialize(init, cond, next, body);
+      return block;
+    } else {
+      loop->Initialize(init, cond, next, body);
+      return loop;
+    }
+  }
+}
 
 #undef CHECK_OK
 #undef CHECK_OK_CUSTOM
 
 template <typename Impl>
-void ParserBase<Impl>::ObjectLiteralChecker::CheckProperty(
-    Token::Value property, PropertyKind type, MethodKind method_type,
-    ExpressionClassifier* classifier, bool* ok) {
-  DCHECK(!IsStaticMethod(method_type));
-  DCHECK(!IsSpecialMethod(method_type) || type == kMethodProperty);
-
+void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
+    Token::Value property) {
   if (property == Token::SMI || property == Token::NUMBER) return;
 
-  if (type == kValueProperty && IsProto()) {
+  if (IsProto()) {
     if (has_seen_proto_) {
-      classifier->RecordObjectLiteralError(
+      this->parser()->classifier()->RecordExpressionError(
           this->scanner()->location(), MessageTemplate::kDuplicateProto);
       return;
     }
@@ -3698,23 +5423,22 @@
 }
 
 template <typename Impl>
-void ParserBase<Impl>::ClassLiteralChecker::CheckProperty(
-    Token::Value property, PropertyKind type, MethodKind method_type,
-    ExpressionClassifier* classifier, bool* ok) {
-  DCHECK(type == kMethodProperty || type == kAccessorProperty);
+void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
+    Token::Value property, PropertyKind type, bool is_generator, bool is_async,
+    bool is_static, bool* ok) {
+  DCHECK(type == PropertyKind::kMethodProperty ||
+         type == PropertyKind::kAccessorProperty);
 
   if (property == Token::SMI || property == Token::NUMBER) return;
 
-  if (IsStaticMethod(method_type)) {
+  if (is_static) {
     if (IsPrototype()) {
       this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
       *ok = false;
       return;
     }
   } else if (IsConstructor()) {
-    const bool is_generator = IsGeneratorMethod(method_type);
-    const bool is_async = IsAsyncMethod(method_type);
-    if (is_generator || is_async || type == kAccessorProperty) {
+    if (is_generator || is_async || type == PropertyKind::kAccessorProperty) {
       MessageTemplate::Template msg =
           is_generator ? MessageTemplate::kConstructorIsGenerator
                        : is_async ? MessageTemplate::kConstructorIsAsync
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index cfc2de8..7b88695 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -15,6 +15,7 @@
 #include "src/base/platform/platform.h"
 #include "src/char-predicates-inl.h"
 #include "src/messages.h"
+#include "src/parsing/duplicate-finder.h"
 #include "src/parsing/parameter-initializer-rewriter.h"
 #include "src/parsing/parse-info.h"
 #include "src/parsing/rewriter.h"
@@ -121,12 +122,20 @@
     if (use_temp_zone) {
       parser_->fni_ = &fni_;
       parser_->zone_ = temp_zone;
+      if (parser_->reusable_preparser_ != nullptr) {
+        parser_->reusable_preparser_->zone_ = temp_zone;
+      }
     }
   }
-  ~DiscardableZoneScope() {
+  void Reset() {
     parser_->fni_ = prev_fni_;
     parser_->zone_ = prev_zone_;
+    if (parser_->reusable_preparser_ != nullptr) {
+      parser_->reusable_preparser_->zone_ = prev_zone_;
+    }
+    ast_node_factory_scope_.Reset();
   }
+  ~DiscardableZoneScope() { Reset(); }
 
  private:
   AstNodeFactory::BodyScope ast_node_factory_scope_;
@@ -149,9 +158,64 @@
   }
 }
 
+Expression* Parser::CallClassFieldInitializer(Scope* scope,
+                                              Expression* this_expr) {
+  // This produces the expression
+  // `.class_field_intializer(this_expr)`, where '.class_field_intializer' is
+  // the name
+  // of a synthetic variable.
+  // 'this_expr' will be 'this' in a base constructor and the result of calling
+  // 'super' in a derived one.
+  const AstRawString* init_fn_name =
+      ast_value_factory()->dot_class_field_init_string();
+  VariableProxy* init_fn_proxy = scope->NewUnresolved(factory(), init_fn_name);
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(init_fn_proxy, zone());
+  args->Add(this_expr, zone());
+  return factory()->NewCallRuntime(Runtime::kInlineCall, args,
+                                   kNoSourcePosition);
+}
+
+Expression* Parser::RewriteSuperCall(Expression* super_call) {
+  // TODO(bakkot) find a way to avoid this for classes without fields.
+  if (!allow_harmony_class_fields()) {
+    return super_call;
+  }
+  // This turns a super call `super()` into a do expression of the form
+  // do {
+  //   tmp x = super();
+  //   if (.class-field-init)
+  //     .class-field-init(x)
+  //   x; // This isn't actually present; our do-expression representation
+  // allows specifying that the expression returns x directly.
+  // }
+  Variable* var_tmp =
+      scope()->NewTemporary(ast_value_factory()->empty_string());
+  Block* block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+  Assignment* assignment = factory()->NewAssignment(
+      Token::ASSIGN, factory()->NewVariableProxy(var_tmp), super_call,
+      kNoSourcePosition);
+  block->statements()->Add(
+      factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
+  const AstRawString* init_fn_name =
+      ast_value_factory()->dot_class_field_init_string();
+  VariableProxy* init_fn_proxy =
+      scope()->NewUnresolved(factory(), init_fn_name);
+  Expression* condition = init_fn_proxy;
+  Statement* initialize = factory()->NewExpressionStatement(
+      CallClassFieldInitializer(scope(), factory()->NewVariableProxy(var_tmp)),
+      kNoSourcePosition);
+  IfStatement* if_statement = factory()->NewIfStatement(
+      condition, initialize, factory()->NewEmptyStatement(kNoSourcePosition),
+      kNoSourcePosition);
+  block->statements()->Add(if_statement, zone());
+  return factory()->NewDoExpression(block, var_tmp, kNoSourcePosition);
+}
+
 FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
-                                            bool call_super, int pos,
-                                            int end_pos,
+                                            bool call_super,
+                                            bool requires_class_field_init,
+                                            int pos, int end_pos,
                                             LanguageMode language_mode) {
   int materialized_literal_count = -1;
   int expected_property_count = -1;
@@ -170,7 +234,7 @@
 
   {
     FunctionState function_state(&function_state_, &scope_state_,
-                                 function_scope, kind);
+                                 function_scope);
 
     body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
     if (call_super) {
@@ -204,8 +268,11 @@
       VariableProxy* new_target_proxy =
           NewUnresolved(ast_value_factory()->new_target_string(), pos);
       args->Add(new_target_proxy, zone());
-      CallRuntime* call = factory()->NewCallRuntime(
+      Expression* call = factory()->NewCallRuntime(
           Context::REFLECT_CONSTRUCT_INDEX, args, pos);
+      if (requires_class_field_init) {
+        call = CallClassFieldInitializer(scope(), call);
+      }
       body->Add(factory()->NewReturnStatement(call, pos), zone());
     }
 
@@ -218,7 +285,9 @@
       expected_property_count, parameter_count,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, kind, pos);
+      FunctionLiteral::kShouldLazyCompile, pos);
+
+  function_literal->set_requires_class_field_init(requires_class_field_init);
 
   return function_literal;
 }
@@ -230,41 +299,39 @@
 // 'continue' statement targets). Upon construction, a new target is
 // added; it is removed upon destruction.
 
-class Target BASE_EMBEDDED {
+class ParserTarget BASE_EMBEDDED {
  public:
-  Target(Target** variable, BreakableStatement* statement)
-      : variable_(variable), statement_(statement), previous_(*variable) {
-    *variable = this;
+  ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
+      : variable_(&parser->impl()->target_stack_),
+        statement_(statement),
+        previous_(parser->impl()->target_stack_) {
+    parser->impl()->target_stack_ = this;
   }
 
-  ~Target() {
-    *variable_ = previous_;
-  }
+  ~ParserTarget() { *variable_ = previous_; }
 
-  Target* previous() { return previous_; }
+  ParserTarget* previous() { return previous_; }
   BreakableStatement* statement() { return statement_; }
 
  private:
-  Target** variable_;
+  ParserTarget** variable_;
   BreakableStatement* statement_;
-  Target* previous_;
+  ParserTarget* previous_;
 };
 
-
-class TargetScope BASE_EMBEDDED {
+class ParserTargetScope BASE_EMBEDDED {
  public:
-  explicit TargetScope(Target** variable)
-      : variable_(variable), previous_(*variable) {
-    *variable = NULL;
+  explicit ParserTargetScope(ParserBase<Parser>* parser)
+      : variable_(&parser->impl()->target_stack_),
+        previous_(parser->impl()->target_stack_) {
+    parser->impl()->target_stack_ = nullptr;
   }
 
-  ~TargetScope() {
-    *variable_ = previous_;
-  }
+  ~ParserTargetScope() { *variable_ = previous_; }
 
  private:
-  Target** variable_;
-  Target* previous_;
+  ParserTarget** variable_;
+  ParserTarget* previous_;
 };
 
 
@@ -276,17 +343,14 @@
 // thus it must never be used where only a single statement
 // is correct (e.g. an if statement branch w/o braces)!
 
-#define CHECK_OK  ok);      \
-  if (!*ok) return nullptr; \
+#define CHECK_OK_VALUE(x) ok); \
+  if (!*ok) return x;          \
   ((void)0
 #define DUMMY )  // to make indentation work
 #undef DUMMY
 
-#define CHECK_OK_VOID  ok); \
-  if (!*ok) return;         \
-  ((void)0
-#define DUMMY )  // to make indentation work
-#undef DUMMY
+#define CHECK_OK CHECK_OK_VALUE(nullptr)
+#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
 
 #define CHECK_FAILED /**/); \
   if (failed_) return nullptr;  \
@@ -297,76 +361,9 @@
 // ----------------------------------------------------------------------------
 // Implementation of Parser
 
-bool ParserBaseTraits<Parser>::IsEval(const AstRawString* identifier) const {
-  return identifier == delegate()->ast_value_factory()->eval_string();
-}
-
-bool ParserBaseTraits<Parser>::IsArguments(
-    const AstRawString* identifier) const {
-  return identifier == delegate()->ast_value_factory()->arguments_string();
-}
-
-bool ParserBaseTraits<Parser>::IsEvalOrArguments(
-    const AstRawString* identifier) const {
-  return IsEval(identifier) || IsArguments(identifier);
-}
-
-bool ParserBaseTraits<Parser>::IsUndefined(
-    const AstRawString* identifier) const {
-  return identifier == delegate()->ast_value_factory()->undefined_string();
-}
-
-bool ParserBaseTraits<Parser>::IsPrototype(
-    const AstRawString* identifier) const {
-  return identifier == delegate()->ast_value_factory()->prototype_string();
-}
-
-bool ParserBaseTraits<Parser>::IsConstructor(
-    const AstRawString* identifier) const {
-  return identifier == delegate()->ast_value_factory()->constructor_string();
-}
-
-bool ParserBaseTraits<Parser>::IsThisProperty(Expression* expression) {
-  DCHECK(expression != NULL);
-  Property* property = expression->AsProperty();
-  return property != NULL && property->obj()->IsVariableProxy() &&
-         property->obj()->AsVariableProxy()->is_this();
-}
-
-bool ParserBaseTraits<Parser>::IsIdentifier(Expression* expression) {
-  VariableProxy* operand = expression->AsVariableProxy();
-  return operand != NULL && !operand->is_this();
-}
-
-void ParserBaseTraits<Parser>::PushPropertyName(FuncNameInferrer* fni,
-                                                Expression* expression) {
-  if (expression->IsPropertyName()) {
-    fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
-  } else {
-    fni->PushLiteralName(
-        delegate()->ast_value_factory()->anonymous_function_string());
-  }
-}
-
-void ParserBaseTraits<Parser>::CheckAssigningFunctionLiteralToProperty(
-    Expression* left, Expression* right) {
-  DCHECK(left != NULL);
-  if (left->IsProperty() && right->IsFunctionLiteral()) {
-    right->AsFunctionLiteral()->set_pretenure();
-  }
-}
-
-Expression* ParserBaseTraits<Parser>::MarkExpressionAsAssigned(
-    Expression* expression) {
-  VariableProxy* proxy =
-      expression != NULL ? expression->AsVariableProxy() : NULL;
-  if (proxy != NULL) proxy->set_is_assigned();
-  return expression;
-}
-
-bool ParserBaseTraits<Parser>::ShortcutNumericLiteralBinaryExpression(
-    Expression** x, Expression* y, Token::Value op, int pos,
-    AstNodeFactory* factory) {
+bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
+                                                    Expression* y,
+                                                    Token::Value op, int pos) {
   if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
       y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
     double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
@@ -376,53 +373,53 @@
     bool has_dot = x_has_dot || y_has_dot;
     switch (op) {
       case Token::ADD:
-        *x = factory->NewNumberLiteral(x_val + y_val, pos, has_dot);
+        *x = factory()->NewNumberLiteral(x_val + y_val, pos, has_dot);
         return true;
       case Token::SUB:
-        *x = factory->NewNumberLiteral(x_val - y_val, pos, has_dot);
+        *x = factory()->NewNumberLiteral(x_val - y_val, pos, has_dot);
         return true;
       case Token::MUL:
-        *x = factory->NewNumberLiteral(x_val * y_val, pos, has_dot);
+        *x = factory()->NewNumberLiteral(x_val * y_val, pos, has_dot);
         return true;
       case Token::DIV:
-        *x = factory->NewNumberLiteral(x_val / y_val, pos, has_dot);
+        *x = factory()->NewNumberLiteral(x_val / y_val, pos, has_dot);
         return true;
       case Token::BIT_OR: {
         int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::BIT_AND: {
         int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::BIT_XOR: {
         int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::SHL: {
         int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::SHR: {
         uint32_t shift = DoubleToInt32(y_val) & 0x1f;
         uint32_t value = DoubleToUint32(x_val) >> shift;
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::SAR: {
         uint32_t shift = DoubleToInt32(y_val) & 0x1f;
         int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
-        *x = factory->NewNumberLiteral(value, pos, has_dot);
+        *x = factory()->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
       case Token::EXP: {
         double value = Pow(x_val, y_val);
         int int_value = static_cast<int>(value);
-        *x = factory->NewNumberLiteral(
+        *x = factory()->NewNumberLiteral(
             int_value == value && value != -0.0 ? int_value : value, pos,
             has_dot);
         return true;
@@ -434,15 +431,15 @@
   return false;
 }
 
-Expression* ParserBaseTraits<Parser>::BuildUnaryExpression(
-    Expression* expression, Token::Value op, int pos, AstNodeFactory* factory) {
+Expression* Parser::BuildUnaryExpression(Expression* expression,
+                                         Token::Value op, int pos) {
   DCHECK(expression != NULL);
   if (expression->IsLiteral()) {
     const AstValue* literal = expression->AsLiteral()->raw_value();
     if (op == Token::NOT) {
       // Convert the literal to a boolean condition and negate it.
       bool condition = literal->BooleanValue();
-      return factory->NewBooleanLiteral(!condition, pos);
+      return factory()->NewBooleanLiteral(!condition, pos);
     } else if (literal->IsNumber()) {
       // Compute some expressions involving only number literals.
       double value = literal->AsNumber();
@@ -451,9 +448,10 @@
         case Token::ADD:
           return expression;
         case Token::SUB:
-          return factory->NewNumberLiteral(-value, pos, has_dot);
+          return factory()->NewNumberLiteral(-value, pos, has_dot);
         case Token::BIT_NOT:
-          return factory->NewNumberLiteral(~DoubleToInt32(value), pos, has_dot);
+          return factory()->NewNumberLiteral(~DoubleToInt32(value), pos,
+                                             has_dot);
         default:
           break;
       }
@@ -461,53 +459,33 @@
   }
   // Desugar '+foo' => 'foo*1'
   if (op == Token::ADD) {
-    return factory->NewBinaryOperation(
-        Token::MUL, expression, factory->NewNumberLiteral(1, pos, true), pos);
+    return factory()->NewBinaryOperation(
+        Token::MUL, expression, factory()->NewNumberLiteral(1, pos, true), pos);
   }
   // The same idea for '-foo' => 'foo*(-1)'.
   if (op == Token::SUB) {
-    return factory->NewBinaryOperation(
-        Token::MUL, expression, factory->NewNumberLiteral(-1, pos), pos);
+    return factory()->NewBinaryOperation(
+        Token::MUL, expression, factory()->NewNumberLiteral(-1, pos), pos);
   }
   // ...and one more time for '~foo' => 'foo^(~0)'.
   if (op == Token::BIT_NOT) {
-    return factory->NewBinaryOperation(
-        Token::BIT_XOR, expression, factory->NewNumberLiteral(~0, pos), pos);
+    return factory()->NewBinaryOperation(
+        Token::BIT_XOR, expression, factory()->NewNumberLiteral(~0, pos), pos);
   }
-  return factory->NewUnaryOperation(op, expression, pos);
+  return factory()->NewUnaryOperation(op, expression, pos);
 }
 
-Expression* ParserBaseTraits<Parser>::BuildIteratorResult(Expression* value,
-                                                          bool done) {
+Expression* Parser::BuildIteratorResult(Expression* value, bool done) {
   int pos = kNoSourcePosition;
-  AstNodeFactory* factory = delegate()->factory();
-  Zone* zone = delegate()->zone();
 
-  if (value == nullptr) value = factory->NewUndefinedLiteral(pos);
+  if (value == nullptr) value = factory()->NewUndefinedLiteral(pos);
 
-  auto args = new (zone) ZoneList<Expression*>(2, zone);
-  args->Add(value, zone);
-  args->Add(factory->NewBooleanLiteral(done, pos), zone);
+  auto args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(value, zone());
+  args->Add(factory()->NewBooleanLiteral(done, pos), zone());
 
-  return factory->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
-                                 pos);
-}
-
-Expression* ParserBaseTraits<Parser>::NewThrowReferenceError(
-    MessageTemplate::Template message, int pos) {
-  return delegate()->NewThrowError(
-      Runtime::kNewReferenceError, message,
-      delegate()->ast_value_factory()->empty_string(), pos);
-}
-
-Expression* ParserBaseTraits<Parser>::NewThrowSyntaxError(
-    MessageTemplate::Template message, const AstRawString* arg, int pos) {
-  return delegate()->NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
-}
-
-Expression* ParserBaseTraits<Parser>::NewThrowTypeError(
-    MessageTemplate::Template message, const AstRawString* arg, int pos) {
-  return delegate()->NewThrowError(Runtime::kNewTypeError, message, arg, pos);
+  return factory()->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
+                                   pos);
 }
 
 Expression* Parser::NewThrowError(Runtime::FunctionId id,
@@ -520,124 +498,62 @@
   return factory()->NewThrow(call_constructor, pos);
 }
 
-void ParserBaseTraits<Parser>::ReportMessageAt(
-    Scanner::Location source_location, MessageTemplate::Template message,
-    const char* arg, ParseErrorType error_type) {
-  if (delegate()->stack_overflow()) {
-    // Suppress the error message (syntax error or such) in the presence of a
-    // stack overflow. The isolate allows only one pending exception at at time
-    // and we want to report the stack overflow later.
-    return;
-  }
-  delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
-                                                     source_location.end_pos,
-                                                     message, arg, error_type);
-}
-
-void ParserBaseTraits<Parser>::ReportMessageAt(
-    Scanner::Location source_location, MessageTemplate::Template message,
-    const AstRawString* arg, ParseErrorType error_type) {
-  if (delegate()->stack_overflow()) {
-    // Suppress the error message (syntax error or such) in the presence of a
-    // stack overflow. The isolate allows only one pending exception at at time
-    // and we want to report the stack overflow later.
-    return;
-  }
-  delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
-                                                     source_location.end_pos,
-                                                     message, arg, error_type);
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetSymbol(
-    Scanner* scanner) const {
-  const AstRawString* result =
-      delegate()->scanner()->CurrentSymbol(delegate()->ast_value_factory());
-  DCHECK(result != NULL);
-  return result;
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetNumberAsSymbol(
-    Scanner* scanner) const {
-  double double_value = delegate()->scanner()->DoubleValue();
-  char array[100];
-  const char* string = DoubleToCString(double_value, ArrayVector(array));
-  return delegate()->ast_value_factory()->GetOneByteString(string);
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetNextSymbol(
-    Scanner* scanner) const {
-  return delegate()->scanner()->NextSymbol(delegate()->ast_value_factory());
-}
-
-Expression* ParserBaseTraits<Parser>::ThisExpression(int pos) {
-  return delegate()->NewUnresolved(
-      delegate()->ast_value_factory()->this_string(), pos, pos + 4,
-      Variable::THIS);
-}
-
-Expression* ParserBaseTraits<Parser>::NewSuperPropertyReference(
-    AstNodeFactory* factory, int pos) {
+Expression* Parser::NewSuperPropertyReference(int pos) {
   // this_function[home_object_symbol]
-  VariableProxy* this_function_proxy = delegate()->NewUnresolved(
-      delegate()->ast_value_factory()->this_function_string(), pos);
+  VariableProxy* this_function_proxy =
+      NewUnresolved(ast_value_factory()->this_function_string(), pos);
   Expression* home_object_symbol_literal =
-      factory->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
-  Expression* home_object = factory->NewProperty(
+      factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
+  Expression* home_object = factory()->NewProperty(
       this_function_proxy, home_object_symbol_literal, pos);
-  return factory->NewSuperPropertyReference(
+  return factory()->NewSuperPropertyReference(
       ThisExpression(pos)->AsVariableProxy(), home_object, pos);
 }
 
-Expression* ParserBaseTraits<Parser>::NewSuperCallReference(
-    AstNodeFactory* factory, int pos) {
-  VariableProxy* new_target_proxy = delegate()->NewUnresolved(
-      delegate()->ast_value_factory()->new_target_string(), pos);
-  VariableProxy* this_function_proxy = delegate()->NewUnresolved(
-      delegate()->ast_value_factory()->this_function_string(), pos);
-  return factory->NewSuperCallReference(ThisExpression(pos)->AsVariableProxy(),
-                                        new_target_proxy, this_function_proxy,
-                                        pos);
+Expression* Parser::NewSuperCallReference(int pos) {
+  VariableProxy* new_target_proxy =
+      NewUnresolved(ast_value_factory()->new_target_string(), pos);
+  VariableProxy* this_function_proxy =
+      NewUnresolved(ast_value_factory()->this_function_string(), pos);
+  return factory()->NewSuperCallReference(
+      ThisExpression(pos)->AsVariableProxy(), new_target_proxy,
+      this_function_proxy, pos);
 }
 
-Expression* ParserBaseTraits<Parser>::NewTargetExpression(int pos) {
+Expression* Parser::NewTargetExpression(int pos) {
   static const int kNewTargetStringLength = 10;
-  auto proxy = delegate()->NewUnresolved(
-      delegate()->ast_value_factory()->new_target_string(), pos,
-      pos + kNewTargetStringLength);
+  auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos,
+                             pos + kNewTargetStringLength);
   proxy->set_is_new_target();
   return proxy;
 }
 
-Expression* ParserBaseTraits<Parser>::FunctionSentExpression(
-    AstNodeFactory* factory, int pos) const {
+Expression* Parser::FunctionSentExpression(int pos) {
   // We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
-  Zone* zone = delegate()->zone();
-  ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
-  VariableProxy* generator = factory->NewVariableProxy(
-      delegate()->function_state_->generator_object_variable());
-  args->Add(generator, zone);
-  return factory->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
-                                 args, pos);
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+  VariableProxy* generator =
+      factory()->NewVariableProxy(function_state_->generator_object_variable());
+  args->Add(generator, zone());
+  return factory()->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
+                                   args, pos);
 }
 
-Literal* ParserBaseTraits<Parser>::ExpressionFromLiteral(
-    Token::Value token, int pos, Scanner* scanner,
-    AstNodeFactory* factory) const {
+Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
   switch (token) {
     case Token::NULL_LITERAL:
-      return factory->NewNullLiteral(pos);
+      return factory()->NewNullLiteral(pos);
     case Token::TRUE_LITERAL:
-      return factory->NewBooleanLiteral(true, pos);
+      return factory()->NewBooleanLiteral(true, pos);
     case Token::FALSE_LITERAL:
-      return factory->NewBooleanLiteral(false, pos);
+      return factory()->NewBooleanLiteral(false, pos);
     case Token::SMI: {
-      int value = scanner->smi_value();
-      return factory->NewSmiLiteral(value, pos);
+      int value = scanner()->smi_value();
+      return factory()->NewSmiLiteral(value, pos);
     }
     case Token::NUMBER: {
-      bool has_dot = scanner->ContainsDot();
-      double value = scanner->DoubleValue();
-      return factory->NewNumberLiteral(value, pos, has_dot);
+      bool has_dot = scanner()->ContainsDot();
+      double value = scanner()->DoubleValue();
+      return factory()->NewNumberLiteral(value, pos, has_dot);
     }
     default:
       DCHECK(false);
@@ -645,43 +561,74 @@
   return NULL;
 }
 
-Expression* ParserBaseTraits<Parser>::ExpressionFromIdentifier(
-    const AstRawString* name, int start_position, int end_position,
-    InferName infer) {
-  if (infer == InferName::kYes && delegate()->fni_ != NULL) {
-    delegate()->fni_->PushVariableName(name);
-  }
-  return delegate()->NewUnresolved(name, start_position, end_position);
-}
-
-Expression* ParserBaseTraits<Parser>::ExpressionFromString(
-    int pos, Scanner* scanner, AstNodeFactory* factory) const {
-  const AstRawString* symbol = GetSymbol(scanner);
-  if (delegate()->fni_ != NULL) delegate()->fni_->PushLiteralName(symbol);
-  return factory->NewStringLiteral(symbol, pos);
-}
-
-Expression* ParserBaseTraits<Parser>::GetIterator(Expression* iterable,
-                                                  AstNodeFactory* factory,
-                                                  int pos) {
+Expression* Parser::GetIterator(Expression* iterable, int pos) {
   Expression* iterator_symbol_literal =
-      factory->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
+      factory()->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
   Expression* prop =
-      factory->NewProperty(iterable, iterator_symbol_literal, pos);
-  Zone* zone = delegate()->zone();
-  ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(0, zone);
-  return factory->NewCall(prop, args, pos);
-}
-
-Literal* ParserBaseTraits<Parser>::GetLiteralTheHole(
-    int position, AstNodeFactory* factory) const {
-  return factory->NewTheHoleLiteral(kNoSourcePosition);
+      factory()->NewProperty(iterable, iterator_symbol_literal, pos);
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(0, zone());
+  return factory()->NewCall(prop, args, pos);
 }
 
 void Parser::MarkTailPosition(Expression* expression) {
   expression->MarkTail();
 }
 
+Expression* Parser::NewV8Intrinsic(const AstRawString* name,
+                                   ZoneList<Expression*>* args, int pos,
+                                   bool* ok) {
+  if (extension_ != nullptr) {
+    // The extension structures are only accessible while parsing the
+    // very first time, not when reparsing because of lazy compilation.
+    GetClosureScope()->ForceEagerCompilation();
+  }
+
+  DCHECK(name->is_one_byte());
+  const Runtime::Function* function =
+      Runtime::FunctionForName(name->raw_data(), name->length());
+
+  if (function != nullptr) {
+    // Check for possible name clash.
+    DCHECK_EQ(Context::kNotFound,
+              Context::IntrinsicIndexForName(name->raw_data(), name->length()));
+    // Check for built-in IS_VAR macro.
+    if (function->function_id == Runtime::kIS_VAR) {
+      DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
+      // %IS_VAR(x) evaluates to x if x is a variable,
+      // leads to a parse error otherwise.  Could be implemented as an
+      // inline function %_IS_VAR(x) to eliminate this special case.
+      if (args->length() == 1 && args->at(0)->AsVariableProxy() != nullptr) {
+        return args->at(0);
+      } else {
+        ReportMessage(MessageTemplate::kNotIsvar);
+        *ok = false;
+        return nullptr;
+      }
+    }
+
+    // Check that the expected number of arguments are being passed.
+    if (function->nargs != -1 && function->nargs != args->length()) {
+      ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
+      *ok = false;
+      return nullptr;
+    }
+
+    return factory()->NewCallRuntime(function, args, pos);
+  }
+
+  int context_index =
+      Context::IntrinsicIndexForName(name->raw_data(), name->length());
+
+  // Check that the function is defined.
+  if (context_index == Context::kNotFound) {
+    ReportMessage(MessageTemplate::kNotDefined, name);
+    *ok = false;
+    return nullptr;
+  }
+
+  return factory()->NewCallRuntime(context_index, args, pos);
+}
+
 Parser::Parser(ParseInfo* info)
     : ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
                          info->extension(), info->ast_value_factory(), NULL),
@@ -699,7 +646,8 @@
   // ParseInfo during background parsing.
   DCHECK(!info->script().is_null() || info->source_stream() != nullptr ||
          info->character_stream() != nullptr);
-  set_allow_lazy(info->allow_lazy_parsing());
+  set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
+                 !info->is_native() && info->extension() == nullptr);
   set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
   set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
                       info->isolate()->is_tail_call_elimination_enabled());
@@ -711,6 +659,7 @@
   set_allow_harmony_async_await(FLAG_harmony_async_await);
   set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
   set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
+  set_allow_harmony_class_fields(FLAG_harmony_class_fields);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -725,29 +674,19 @@
 }
 
 void Parser::DeserializeScopeChain(
-    ParseInfo* info, Handle<Context> context,
-    Scope::DeserializationMode deserialization_mode) {
+    ParseInfo* info, MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
   DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
   // TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
   // context, which will have the "this" binding for script scopes.
   DeclarationScope* script_scope = NewScriptScope();
   info->set_script_scope(script_scope);
   Scope* scope = script_scope;
-  if (!context.is_null() && !context->IsNativeContext()) {
-    scope = Scope::DeserializeScopeChain(info->isolate(), zone(), *context,
-                                         script_scope, ast_value_factory(),
-                                         deserialization_mode);
-    if (info->context().is_null()) {
-      DCHECK(deserialization_mode ==
-             Scope::DeserializationMode::kDeserializeOffHeap);
-    } else {
-      // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
-      // means the Parser cannot operate independent of the V8 heap. Tell the
-      // string table to internalize strings and values right after they're
-      // created. This kind of parsing can only be done in the main thread.
-      DCHECK(parsing_on_main_thread_);
-      ast_value_factory()->Internalize(info->isolate());
-    }
+  Handle<ScopeInfo> outer_scope_info;
+  if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
+    scope = Scope::DeserializeScopeChain(
+        info->isolate(), zone(), *outer_scope_info, script_scope,
+        ast_value_factory(), Scope::DeserializationMode::kScopesOnly);
+    DCHECK(!info->is_module() || scope->is_module_scope());
   }
   original_scope_ = scope;
 }
@@ -762,8 +701,7 @@
 
   HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
   RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::Parse);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.Parse");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -781,24 +719,13 @@
     cached_parse_data_->Initialize();
   }
 
-  DeserializeScopeChain(info, info->context(),
-                        Scope::DeserializationMode::kKeepScopeInfo);
+  DeserializeScopeChain(info, info->maybe_outer_scope_info());
 
   source = String::Flatten(source);
   FunctionLiteral* result;
 
   {
-    std::unique_ptr<Utf16CharacterStream> stream;
-    if (source->IsExternalTwoByteString()) {
-      stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
-          Handle<ExternalTwoByteString>::cast(source), 0, source->length()));
-    } else if (source->IsExternalOneByteString()) {
-      stream.reset(new ExternalOneByteStringUtf16CharacterStream(
-          Handle<ExternalOneByteString>::cast(source), 0, source->length()));
-    } else {
-      stream.reset(
-          new GenericStringUtf16CharacterStream(source, 0, source->length()));
-    }
+    std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(source));
     scanner_.Initialize(stream.get());
     result = DoParseProgram(info);
   }
@@ -835,27 +762,25 @@
   DCHECK_NULL(scope_state_);
   DCHECK_NULL(target_stack_);
 
-  Mode parsing_mode = FLAG_lazy && allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
-  if (allow_natives() || extension_ != NULL) parsing_mode = PARSE_EAGERLY;
+  Mode parsing_mode = allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
 
   FunctionLiteral* result = NULL;
   {
     Scope* outer = original_scope_;
-    // If there's a chance that there's a reference to global 'this', predeclare
-    // it as a dynamic global on the script scope.
-    if (outer->GetReceiverScope()->is_script_scope()) {
-      info->script_scope()->DeclareDynamicGlobal(
-          ast_value_factory()->this_string(), Variable::THIS);
-    }
-    DCHECK(outer);
+    DCHECK_NOT_NULL(outer);
+    parsing_module_ = info->is_module();
     if (info->is_eval()) {
       if (!outer->is_script_scope() || is_strict(info->language_mode())) {
         parsing_mode = PARSE_EAGERLY;
       }
       outer = NewEvalScope(outer);
-    } else if (info->is_module()) {
+    } else if (parsing_module_) {
       DCHECK_EQ(outer, info->script_scope());
       outer = NewModuleScope(info->script_scope());
+      // Never do lazy parsing in modules.  If we want to support this in the
+      // future, we must force context-allocation for all variables that are
+      // declared at the module level but not MODULE-allocated.
+      parsing_mode = PARSE_EAGERLY;
     }
 
     DeclarationScope* scope = outer->AsDeclarationScope();
@@ -864,14 +789,29 @@
 
     // Enter 'scope' with the given parsing mode.
     ParsingModeScope parsing_mode_scope(this, parsing_mode);
-    FunctionState function_state(&function_state_, &scope_state_, scope,
-                                 kNormalFunction);
+    FunctionState function_state(&function_state_, &scope_state_, scope);
 
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
     bool ok = true;
     int beg_pos = scanner()->location().beg_pos;
-    parsing_module_ = info->is_module();
     if (parsing_module_) {
+      // Declare the special module parameter.
+      auto name = ast_value_factory()->empty_string();
+      bool is_duplicate;
+      bool is_rest = false;
+      bool is_optional = false;
+      auto var = scope->DeclareParameter(name, VAR, is_optional, is_rest,
+                                         &is_duplicate, ast_value_factory());
+      DCHECK(!is_duplicate);
+      var->AllocateTo(VariableLocation::PARAMETER, 0);
+
+      PrepareGeneratorVariables(&function_state);
+      Expression* initial_yield =
+          BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
+      body->Add(
+          factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+          zone());
+
       ParseModuleItemList(body, &ok);
       ok = ok &&
            module()->Validate(this->scope()->AsModuleScope(),
@@ -889,7 +829,7 @@
 
     if (ok && is_strict(language_mode())) {
       CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
-      CheckDecimalLiteralWithLeadingZero(use_counts_, beg_pos,
+      CheckDecimalLiteralWithLeadingZero(beg_pos,
                                          scanner()->location().end_pos);
     }
     if (ok && is_sloppy(language_mode())) {
@@ -897,7 +837,7 @@
       // pre-existing bindings should be made writable, enumerable and
       // nonconfigurable if possible, whereas this code will leave attributes
       // unchanged if the property already exists.
-      InsertSloppyBlockFunctionVarBindings(scope, nullptr, &ok);
+      InsertSloppyBlockFunctionVarBindings(scope);
     }
     if (ok) {
       CheckConflictingVarDeclarations(scope, &ok);
@@ -915,9 +855,10 @@
 
     if (ok) {
       RewriteDestructuringAssignments();
+      int parameter_count = parsing_module_ ? 1 : 0;
       result = factory()->NewScriptOrEvalFunctionLiteral(
           scope, body, function_state.materialized_literal_count(),
-          function_state.expected_property_count());
+          function_state.expected_property_count(), parameter_count);
     }
   }
 
@@ -934,8 +875,7 @@
   DCHECK(parsing_on_main_thread_);
   RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
   HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
-  TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
-      isolate, &tracing::TraceEventStatsTable::ParseLazy);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseLazy");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -943,26 +883,14 @@
     timer.Start();
   }
   Handle<SharedFunctionInfo> shared_info = info->shared_info();
-  DeserializeScopeChain(info, info->context(),
-                        Scope::DeserializationMode::kKeepScopeInfo);
+  DeserializeScopeChain(info, info->maybe_outer_scope_info());
 
   // Initialize parser state.
   source = String::Flatten(source);
   FunctionLiteral* result;
   {
-    std::unique_ptr<Utf16CharacterStream> stream;
-    if (source->IsExternalTwoByteString()) {
-      stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
-          Handle<ExternalTwoByteString>::cast(source),
-          shared_info->start_position(), shared_info->end_position()));
-    } else if (source->IsExternalOneByteString()) {
-      stream.reset(new ExternalOneByteStringUtf16CharacterStream(
-          Handle<ExternalOneByteString>::cast(source),
-          shared_info->start_position(), shared_info->end_position()));
-    } else {
-      stream.reset(new GenericStringUtf16CharacterStream(
-          source, shared_info->start_position(), shared_info->end_position()));
-    }
+    std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
+        source, shared_info->start_position(), shared_info->end_position()));
     Handle<String> name(String::cast(shared_info->name()));
     result =
         DoParseLazy(info, ast_value_factory()->GetString(name), stream.get());
@@ -974,6 +902,8 @@
 
   if (FLAG_trace_parse && result != NULL) {
     double ms = timer.Elapsed().InMillisecondsF();
+    // We need to make sure that the debug-name is available.
+    ast_value_factory()->Internalize(isolate);
     std::unique_ptr<char[]> name_chars = result->debug_name()->ToCString();
     PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
   }
@@ -1010,24 +940,20 @@
 
   {
     // Parse the function literal.
-    Scope* scope = original_scope_;
-    DCHECK(scope);
-    // If there's a chance that there's a reference to global 'this', predeclare
-    // it as a dynamic global on the script scope.
-    if (info->is_arrow() && scope->GetReceiverScope()->is_script_scope()) {
-      info->script_scope()->DeclareDynamicGlobal(
-          ast_value_factory()->this_string(), Variable::THIS);
-    }
-    FunctionState function_state(&function_state_, &scope_state_, scope,
-                                 info->function_kind());
-    DCHECK(is_sloppy(scope->language_mode()) ||
+    Scope* outer = original_scope_;
+    DeclarationScope* outer_function = outer->GetClosureScope();
+    DCHECK(outer);
+    FunctionState function_state(&function_state_, &scope_state_,
+                                 outer_function);
+    BlockState block_state(&scope_state_, outer);
+    DCHECK(is_sloppy(outer->language_mode()) ||
            is_strict(info->language_mode()));
     FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
+    FunctionKind kind = info->function_kind();
     bool ok = true;
 
-    if (info->is_arrow()) {
-      bool is_async = allow_harmony_async_await() && info->is_async();
-      if (is_async) {
+    if (IsArrowFunction(kind)) {
+      if (allow_harmony_async_await() && IsAsyncFunction(kind)) {
         DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
         if (!Check(Token::ASYNC)) {
           CHECK(stack_overflow());
@@ -1040,7 +966,7 @@
       }
 
       // TODO(adamk): We should construct this scope from the ScopeInfo.
-      DeclarationScope* scope = NewFunctionScope(FunctionKind::kArrowFunction);
+      DeclarationScope* scope = NewFunctionScope(kind);
 
       // These two bits only need to be explicitly set because we're
       // not passing the ScopeInfo to the Scope constructor.
@@ -1062,15 +988,12 @@
         BlockState block_state(&scope_state_, scope);
         if (Check(Token::LPAREN)) {
           // '(' StrictFormalParameters ')'
-          ParseFormalParameterList(&formals, &formals_classifier, &ok);
+          ParseFormalParameterList(&formals, &ok);
           if (ok) ok = Check(Token::RPAREN);
         } else {
           // BindingIdentifier
-          ParseFormalParameter(&formals, &formals_classifier, &ok);
-          if (ok) {
-            DeclareFormalParameter(formals.scope, formals.at(0),
-                                   &formals_classifier);
-          }
+          ParseFormalParameter(&formals, &ok);
+          if (ok) DeclareFormalParameter(formals.scope, formals.at(0));
         }
       }
 
@@ -1078,8 +1001,7 @@
         checkpoint.Restore(&formals.materialized_literals_count);
         // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
         // not be observable, or else the preparser would have failed.
-        Expression* expression = ParseArrowFunctionLiteral(
-            true, formals, is_async, formals_classifier, &ok);
+        Expression* expression = ParseArrowFunctionLiteral(true, formals, &ok);
         if (ok) {
           // Scanning must end at the same position that was recorded
           // previously. If not, parsing has been interrupted due to a stack
@@ -1097,16 +1019,31 @@
           }
         }
       }
-    } else if (info->is_default_constructor()) {
-      DCHECK_EQ(this->scope(), scope);
+    } else if (IsDefaultConstructor(kind)) {
+      DCHECK_EQ(scope(), outer);
+      bool is_subclass_constructor = IsSubclassConstructor(kind);
       result = DefaultConstructor(
-          raw_name, IsSubclassConstructor(info->function_kind()),
+          raw_name, is_subclass_constructor, info->requires_class_field_init(),
           info->start_position(), info->end_position(), info->language_mode());
+      if (!is_subclass_constructor && info->requires_class_field_init()) {
+        result = InsertClassFieldInitializer(result);
+      }
+    } else if (info->is_class_field_initializer()) {
+      Handle<SharedFunctionInfo> shared_info = info->shared_info();
+      DCHECK(!shared_info.is_null());
+      if (shared_info->length() == 0) {
+        result = ParseClassFieldForInitializer(
+            info->start_position() != info->end_position(), &ok);
+      } else {
+        result = SynthesizeClassFieldInitializer(shared_info->length());
+      }
     } else {
-      result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
-                                    kSkipFunctionNameCheck,
-                                    info->function_kind(), kNoSourcePosition,
-                                    function_type, info->language_mode(), &ok);
+      result = ParseFunctionLiteral(
+          raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
+          kNoSourcePosition, function_type, info->language_mode(), &ok);
+      if (info->requires_class_field_init()) {
+        result = InsertClassFieldInitializer(result);
+      }
     }
     // Make sure the results agree.
     DCHECK(ok == (result != nullptr));
@@ -1117,131 +1054,6 @@
   return result;
 }
 
-
-void Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
-                                bool* ok) {
-  // StatementList ::
-  //   (StatementListItem)* <end_token>
-
-  // Allocate a target stack to use for this set of source
-  // elements. This way, all scripts and functions get their own
-  // target stack thus avoiding illegal breaks and continues across
-  // functions.
-  TargetScope scope(&this->target_stack_);
-
-  DCHECK(body != NULL);
-  bool directive_prologue = true;     // Parsing directive prologue.
-
-  while (peek() != end_token) {
-    if (directive_prologue && peek() != Token::STRING) {
-      directive_prologue = false;
-    }
-
-    Scanner::Location token_loc = scanner()->peek_location();
-    Statement* stat = ParseStatementListItem(CHECK_OK_VOID);
-    if (stat == NULL || stat->IsEmpty()) {
-      directive_prologue = false;   // End of directive prologue.
-      continue;
-    }
-
-    if (directive_prologue) {
-      // A shot at a directive.
-      ExpressionStatement* e_stat;
-      Literal* literal;
-      // Still processing directive prologue?
-      if ((e_stat = stat->AsExpressionStatement()) != NULL &&
-          (literal = e_stat->expression()->AsLiteral()) != NULL &&
-          literal->raw_value()->IsString()) {
-        // Check "use strict" directive (ES5 14.1), "use asm" directive.
-        bool use_strict_found =
-            literal->raw_value()->AsString() ==
-                ast_value_factory()->use_strict_string() &&
-            token_loc.end_pos - token_loc.beg_pos ==
-                ast_value_factory()->use_strict_string()->length() + 2;
-        if (use_strict_found) {
-          if (is_sloppy(language_mode())) {
-            RaiseLanguageMode(STRICT);
-          }
-
-          if (!this->scope()->HasSimpleParameters()) {
-            // TC39 deemed "use strict" directives to be an error when occurring
-            // in the body of a function with non-simple parameter list, on
-            // 29/7/2015. https://goo.gl/ueA7Ln
-            const AstRawString* string = literal->raw_value()->AsString();
-            ReportMessageAt(token_loc,
-                            MessageTemplate::kIllegalLanguageModeDirective,
-                            string);
-            *ok = false;
-            return;
-          }
-          // Because declarations in strict eval code don't leak into the scope
-          // of the eval call, it is likely that functions declared in strict
-          // eval code will be used within the eval code, so lazy parsing is
-          // probably not a win.
-          if (this->scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
-        } else if (literal->raw_value()->AsString() ==
-                       ast_value_factory()->use_asm_string() &&
-                   token_loc.end_pos - token_loc.beg_pos ==
-                       ast_value_factory()->use_asm_string()->length() + 2) {
-          // Store the usage count; The actual use counter on the isolate is
-          // incremented after parsing is done.
-          ++use_counts_[v8::Isolate::kUseAsm];
-          DCHECK(this->scope()->is_declaration_scope());
-          this->scope()->AsDeclarationScope()->set_asm_module();
-        } else {
-          // Should not change mode, but will increment UseCounter
-          // if appropriate. Ditto usages below.
-          RaiseLanguageMode(SLOPPY);
-        }
-      } else {
-        // End of the directive prologue.
-        directive_prologue = false;
-        RaiseLanguageMode(SLOPPY);
-      }
-    } else {
-      RaiseLanguageMode(SLOPPY);
-    }
-
-    body->Add(stat, zone());
-  }
-}
-
-
-Statement* Parser::ParseStatementListItem(bool* ok) {
-  // (Ecma 262 6th Edition, 13.1):
-  // StatementListItem:
-  //    Statement
-  //    Declaration
-  const Token::Value peeked = peek();
-  switch (peeked) {
-    case Token::FUNCTION:
-      return ParseHoistableDeclaration(NULL, false, ok);
-    case Token::CLASS:
-      Consume(Token::CLASS);
-      return ParseClassDeclaration(NULL, false, ok);
-    case Token::CONST:
-      return ParseVariableStatement(kStatementListItem, NULL, ok);
-    case Token::VAR:
-      return ParseVariableStatement(kStatementListItem, NULL, ok);
-    case Token::LET:
-      if (IsNextLetKeyword()) {
-        return ParseVariableStatement(kStatementListItem, NULL, ok);
-      }
-      break;
-    case Token::ASYNC:
-      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
-          !scanner()->HasAnyLineTerminatorAfterNext()) {
-        Consume(Token::ASYNC);
-        return ParseAsyncFunctionDeclaration(NULL, false, ok);
-      }
-    /* falls through */
-    default:
-      break;
-  }
-  return ParseStatement(NULL, kAllowLabelledFunctionStatement, ok);
-}
-
-
 Statement* Parser::ParseModuleItem(bool* ok) {
   // ecma262/#prod-ModuleItem
   // ModuleItem :
@@ -1285,7 +1097,7 @@
   //    StringLiteral
 
   Expect(Token::STRING, CHECK_OK);
-  return GetSymbol(scanner());
+  return GetSymbol();
 }
 
 
@@ -1413,7 +1225,7 @@
   if (tok == Token::STRING) {
     const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
     ExpectSemicolon(CHECK_OK_VOID);
-    module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+    module()->AddEmptyImport(module_specifier);
     return;
   }
 
@@ -1481,7 +1293,7 @@
 
   if (named_imports != nullptr) {
     if (named_imports->length() == 0) {
-      module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+      module()->AddEmptyImport(module_specifier);
     } else {
       for (int i = 0; i < named_imports->length(); ++i) {
         const NamedImport* import = named_imports->at(i);
@@ -1526,9 +1338,8 @@
     default: {
       int pos = position();
       ExpressionClassifier classifier(this);
-      Expression* value =
-          ParseAssignmentExpression(true, &classifier, CHECK_OK);
-      RewriteNonPattern(&classifier, CHECK_OK);
+      Expression* value = ParseAssignmentExpression(true, CHECK_OK);
+      RewriteNonPattern(CHECK_OK);
       SetFunctionName(value, ast_value_factory()->default_string());
 
       const AstRawString* local_name =
@@ -1621,8 +1432,7 @@
                               export_locations[i], zone());
         }
       } else if (length == 0) {
-        module()->AddEmptyImport(module_specifier, scanner()->location(),
-                                 zone());
+        module()->AddEmptyImport(module_specifier);
       } else {
         for (int i = 0; i < length; ++i) {
           module()->AddExport(original_names[i], export_names[i],
@@ -1673,141 +1483,8 @@
   return result;
 }
 
-Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
-                                  AllowLabelledFunctionStatement allow_function,
-                                  bool* ok) {
-  // Statement ::
-  //   EmptyStatement
-  //   ...
-
-  if (peek() == Token::SEMICOLON) {
-    Next();
-    return factory()->NewEmptyStatement(kNoSourcePosition);
-  }
-  return ParseSubStatement(labels, allow_function, ok);
-}
-
-Statement* Parser::ParseSubStatement(
-    ZoneList<const AstRawString*>* labels,
-    AllowLabelledFunctionStatement allow_function, bool* ok) {
-  // Statement ::
-  //   Block
-  //   VariableStatement
-  //   EmptyStatement
-  //   ExpressionStatement
-  //   IfStatement
-  //   IterationStatement
-  //   ContinueStatement
-  //   BreakStatement
-  //   ReturnStatement
-  //   WithStatement
-  //   LabelledStatement
-  //   SwitchStatement
-  //   ThrowStatement
-  //   TryStatement
-  //   DebuggerStatement
-
-  // Note: Since labels can only be used by 'break' and 'continue'
-  // statements, which themselves are only valid within blocks,
-  // iterations or 'switch' statements (i.e., BreakableStatements),
-  // labels can be simply ignored in all other cases; except for
-  // trivial labeled break statements 'label: break label' which is
-  // parsed into an empty statement.
-  switch (peek()) {
-    case Token::LBRACE:
-      return ParseBlock(labels, ok);
-
-    case Token::SEMICOLON:
-      Next();
-      return factory()->NewEmptyStatement(kNoSourcePosition);
-
-    case Token::IF:
-      return ParseIfStatement(labels, ok);
-
-    case Token::DO:
-      return ParseDoWhileStatement(labels, ok);
-
-    case Token::WHILE:
-      return ParseWhileStatement(labels, ok);
-
-    case Token::FOR:
-      return ParseForStatement(labels, ok);
-
-    case Token::CONTINUE:
-    case Token::BREAK:
-    case Token::RETURN:
-    case Token::THROW:
-    case Token::TRY: {
-      // These statements must have their labels preserved in an enclosing
-      // block
-      if (labels == NULL) {
-        return ParseStatementAsUnlabelled(labels, ok);
-      } else {
-        Block* result =
-            factory()->NewBlock(labels, 1, false, kNoSourcePosition);
-        Target target(&this->target_stack_, result);
-        Statement* statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
-        if (result) result->statements()->Add(statement, zone());
-        return result;
-      }
-    }
-
-    case Token::WITH:
-      return ParseWithStatement(labels, ok);
-
-    case Token::SWITCH:
-      return ParseSwitchStatement(labels, ok);
-
-    case Token::FUNCTION:
-      // FunctionDeclaration only allowed as a StatementListItem, not in
-      // an arbitrary Statement position. Exceptions such as
-      // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
-      // are handled by calling ParseScopedStatement rather than
-      // ParseSubStatement directly.
-      ReportMessageAt(scanner()->peek_location(),
-                      is_strict(language_mode())
-                          ? MessageTemplate::kStrictFunction
-                          : MessageTemplate::kSloppyFunction);
-      *ok = false;
-      return nullptr;
-
-    case Token::DEBUGGER:
-      return ParseDebuggerStatement(ok);
-
-    case Token::VAR:
-      return ParseVariableStatement(kStatement, NULL, ok);
-
-    default:
-      return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
-  }
-}
-
-Statement* Parser::ParseStatementAsUnlabelled(
-    ZoneList<const AstRawString*>* labels, bool* ok) {
-  switch (peek()) {
-    case Token::CONTINUE:
-      return ParseContinueStatement(ok);
-
-    case Token::BREAK:
-      return ParseBreakStatement(labels, ok);
-
-    case Token::RETURN:
-      return ParseReturnStatement(ok);
-
-    case Token::THROW:
-      return ParseThrowStatement(ok);
-
-    case Token::TRY:
-      return ParseTryStatement(ok);
-
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
 VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
-                                     int end_pos, Variable::Kind kind) {
+                                     int end_pos, VariableKind kind) {
   return scope()->NewUnresolved(factory(), name, begin_pos, end_pos, kind);
 }
 
@@ -1816,25 +1493,19 @@
                                 scanner()->location().end_pos);
 }
 
-InitializationFlag Parser::DefaultInitializationFlag(VariableMode mode) {
-  DCHECK(IsDeclaredVariableMode(mode));
-  return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
-}
-
 Declaration* Parser::DeclareVariable(const AstRawString* name,
                                      VariableMode mode, int pos, bool* ok) {
-  return DeclareVariable(name, mode, DefaultInitializationFlag(mode), pos, ok);
+  return DeclareVariable(name, mode, Variable::DefaultInitializationFlag(mode),
+                         pos, ok);
 }
 
 Declaration* Parser::DeclareVariable(const AstRawString* name,
                                      VariableMode mode, InitializationFlag init,
                                      int pos, bool* ok) {
   DCHECK_NOT_NULL(name);
-  Scope* scope =
-      IsLexicalVariableMode(mode) ? this->scope() : GetDeclarationScope();
-  VariableProxy* proxy =
-      scope->NewUnresolved(factory(), name, scanner()->location().beg_pos,
-                           scanner()->location().end_pos);
+  VariableProxy* proxy = factory()->NewVariableProxy(
+      name, NORMAL_VARIABLE, scanner()->location().beg_pos,
+      scanner()->location().end_pos);
   Declaration* declaration =
       factory()->NewVariableDeclaration(proxy, this->scope(), pos);
   Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, CHECK_OK);
@@ -1845,132 +1516,99 @@
                           DeclarationDescriptor::Kind declaration_kind,
                           VariableMode mode, InitializationFlag init, bool* ok,
                           Scope* scope) {
-  DCHECK(IsDeclaredVariableMode(mode) && mode != CONST_LEGACY);
-
-  VariableProxy* proxy = declaration->proxy();
-  DCHECK(proxy->raw_name() != NULL);
-  const AstRawString* name = proxy->raw_name();
-
-  if (scope == nullptr) scope = this->scope();
-  if (mode == VAR) scope = scope->GetDeclarationScope();
-  DCHECK(!scope->is_catch_scope());
-  DCHECK(!scope->is_with_scope());
-  DCHECK(scope->is_declaration_scope() ||
-         (IsLexicalVariableMode(mode) && scope->is_block_scope()));
-
-  bool is_function_declaration = declaration->IsFunctionDeclaration();
-
-  Variable* var = NULL;
-  if (scope->is_eval_scope() && is_sloppy(scope->language_mode()) &&
-      mode == VAR) {
-    // In a var binding in a sloppy direct eval, pollute the enclosing scope
-    // with this new binding by doing the following:
-    // The proxy is bound to a lookup variable to force a dynamic declaration
-    // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
-    Variable::Kind kind = Variable::NORMAL;
-    // TODO(sigurds) figure out if kNotAssigned is OK here
-    var = new (zone()) Variable(scope, name, mode, kind, init, kNotAssigned);
-    var->AllocateTo(VariableLocation::LOOKUP, -1);
-  } else {
-    // Declare the variable in the declaration scope.
-    var = scope->LookupLocal(name);
-    if (var == NULL) {
-      // Declare the name.
-      Variable::Kind kind = Variable::NORMAL;
-      if (is_function_declaration) {
-        kind = Variable::FUNCTION;
-      }
-      var = scope->DeclareLocal(name, mode, init, kind, kNotAssigned);
-    } else if (IsLexicalVariableMode(mode) ||
-               IsLexicalVariableMode(var->mode())) {
-      // Allow duplicate function decls for web compat, see bug 4693.
-      bool duplicate_allowed = false;
-      if (is_sloppy(scope->language_mode()) && is_function_declaration &&
-          var->is_function()) {
-        DCHECK(IsLexicalVariableMode(mode) &&
-               IsLexicalVariableMode(var->mode()));
-        // If the duplication is allowed, then the var will show up
-        // in the SloppyBlockFunctionMap and the new FunctionKind
-        // will be a permitted duplicate.
-        FunctionKind function_kind =
-            declaration->AsFunctionDeclaration()->fun()->kind();
-        duplicate_allowed =
-            scope->GetDeclarationScope()->sloppy_block_function_map()->Lookup(
-                const_cast<AstRawString*>(name), name->hash()) != nullptr &&
-            !IsAsyncFunction(function_kind) &&
-            !(allow_harmony_restrictive_generators() &&
-              IsGeneratorFunction(function_kind));
-      }
-      if (duplicate_allowed) {
-        ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
-      } else {
-        // The name was declared in this scope before; check for conflicting
-        // re-declarations. We have a conflict if either of the declarations
-        // is not a var (in script scope, we also have to ignore legacy const
-        // for compatibility). There is similar code in runtime.cc in the
-        // Declare functions. The function CheckConflictingVarDeclarations
-        // checks for var and let bindings from different scopes whereas this
-        // is a check for conflicting declarations within the same scope. This
-        // check also covers the special case
-        //
-        // function () { let x; { var x; } }
-        //
-        // because the var declaration is hoisted to the function scope where
-        // 'x' is already bound.
-        DCHECK(IsDeclaredVariableMode(var->mode()));
-        // In harmony we treat re-declarations as early errors. See
-        // ES5 16 for a definition of early errors.
-        if (declaration_kind == DeclarationDescriptor::NORMAL) {
-          ReportMessage(MessageTemplate::kVarRedeclaration, name);
-        } else {
-          ReportMessage(MessageTemplate::kParamDupe);
-        }
-        *ok = false;
-        return nullptr;
-      }
-    } else if (mode == VAR) {
-      var->set_maybe_assigned();
-    }
+  if (scope == nullptr) {
+    scope = this->scope();
   }
-  DCHECK_NOT_NULL(var);
-
-  // We add a declaration node for every declaration. The compiler
-  // will only generate code if necessary. In particular, declarations
-  // for inner local variables that do not represent functions won't
-  // result in any generated code.
-  //
-  // This will lead to multiple declaration nodes for the
-  // same variable if it is declared several times. This is not a
-  // semantic issue, but it may be a performance issue since it may
-  // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
-  scope->AddDeclaration(declaration);
-  proxy->BindTo(var);
-  return var;
+  bool sloppy_mode_block_scope_function_redefinition = false;
+  Variable* variable = scope->DeclareVariable(
+      declaration, mode, init, allow_harmony_restrictive_generators(),
+      &sloppy_mode_block_scope_function_redefinition, ok);
+  if (!*ok) {
+    if (declaration_kind == DeclarationDescriptor::NORMAL) {
+      ReportMessage(MessageTemplate::kVarRedeclaration,
+                    declaration->proxy()->raw_name());
+    } else {
+      ReportMessage(MessageTemplate::kParamDupe);
+    }
+    return nullptr;
+  }
+  if (sloppy_mode_block_scope_function_redefinition) {
+    ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
+  }
+  return variable;
 }
 
-
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism.  A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-Statement* Parser::ParseNativeDeclaration(bool* ok) {
-  int pos = peek_position();
-  Expect(Token::FUNCTION, CHECK_OK);
-  // Allow "eval" or "arguments" for backward compatibility.
-  const AstRawString* name =
-      ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  bool done = (peek() == Token::RPAREN);
-  while (!done) {
-    ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-    done = (peek() == Token::RPAREN);
-    if (!done) {
-      Expect(Token::COMMA, CHECK_OK);
-    }
+Block* Parser::BuildInitializationBlock(
+    DeclarationParsingResult* parsing_result,
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  Block* result = factory()->NewBlock(
+      NULL, 1, true, parsing_result->descriptor.declaration_pos);
+  for (auto declaration : parsing_result->declarations) {
+    PatternRewriter::DeclareAndInitializeVariables(
+        this, result, &(parsing_result->descriptor), &declaration, names,
+        CHECK_OK);
   }
-  Expect(Token::RPAREN, CHECK_OK);
-  Expect(Token::SEMICOLON, CHECK_OK);
+  return result;
+}
 
+void Parser::DeclareAndInitializeVariables(
+    Block* block, const DeclarationDescriptor* declaration_descriptor,
+    const DeclarationParsingResult::Declaration* declaration,
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  DCHECK_NOT_NULL(block);
+  PatternRewriter::DeclareAndInitializeVariables(
+      this, block, declaration_descriptor, declaration, names, ok);
+}
+
+Statement* Parser::DeclareFunction(const AstRawString* variable_name,
+                                   FunctionLiteral* function, int pos,
+                                   bool is_generator, bool is_async,
+                                   ZoneList<const AstRawString*>* names,
+                                   bool* ok) {
+  // In ES6, a function behaves as a lexical binding, except in
+  // a script scope, or the initial scope of eval or another function.
+  VariableMode mode =
+      (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
+                                                                       : VAR;
+  VariableProxy* proxy =
+      factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
+  Declaration* declaration =
+      factory()->NewFunctionDeclaration(proxy, function, scope(), pos);
+  Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
+          CHECK_OK);
+  if (names) names->Add(variable_name, zone());
+  // Async functions don't undergo sloppy mode block scoped hoisting, and don't
+  // allow duplicates in a block. Both are represented by the
+  // sloppy_block_function_map. Don't add them to the map for async functions.
+  // Generators are also supposed to be prohibited; currently doing this behind
+  // a flag and UseCounting violations to assess web compatibility.
+  if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
+      !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
+    SloppyBlockFunctionStatement* delegate =
+        factory()->NewSloppyBlockFunctionStatement(scope());
+    DeclarationScope* target_scope = GetDeclarationScope();
+    target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
+    return delegate;
+  }
+  return factory()->NewEmptyStatement(kNoSourcePosition);
+}
+
+Statement* Parser::DeclareClass(const AstRawString* variable_name,
+                                Expression* value,
+                                ZoneList<const AstRawString*>* names,
+                                int class_token_pos, int end_pos, bool* ok) {
+  Declaration* decl =
+      DeclareVariable(variable_name, LET, class_token_pos, CHECK_OK);
+  decl->proxy()->var()->set_initializer_position(end_pos);
+  Assignment* assignment = factory()->NewAssignment(Token::INIT, decl->proxy(),
+                                                    value, class_token_pos);
+  Statement* assignment_statement =
+      factory()->NewExpressionStatement(assignment, kNoSourcePosition);
+  if (names) names->Add(variable_name, zone());
+  return assignment_statement;
+}
+
+Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
   // Make sure that the function containing the native declaration
   // isn't lazily compiled. The extension structures are only
   // accessible while parsing the first time not when reparsing
@@ -1989,734 +1627,102 @@
       pos);
 }
 
-Statement* Parser::ParseHoistableDeclaration(
-    ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
-  Expect(Token::FUNCTION, CHECK_OK);
-  int pos = position();
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
-  if (Check(Token::MUL)) {
-    flags |= ParseFunctionFlags::kIsGenerator;
-  }
-  return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
-}
-
-Statement* Parser::ParseAsyncFunctionDeclaration(
-    ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
-  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
-  int pos = position();
-  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ZoneList<const AstRawString*>* Parser::DeclareLabel(
+    ZoneList<const AstRawString*>* labels, VariableProxy* var, bool* ok) {
+  const AstRawString* label = var->raw_name();
+  // TODO(1240780): We don't check for redeclaration of labels
+  // during preparsing since keeping track of the set of active
+  // labels requires nontrivial changes to the way scopes are
+  // structured.  However, these are probably changes we want to
+  // make later anyway so we should go back and fix this then.
+  if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
+    ReportMessage(MessageTemplate::kLabelRedeclaration, label);
     *ok = false;
-    ReportUnexpectedToken(scanner()->current_token());
     return nullptr;
   }
-  Expect(Token::FUNCTION, CHECK_OK);
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
-  return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+  if (labels == nullptr) {
+    labels = new (zone()) ZoneList<const AstRawString*>(1, zone());
+  }
+  labels->Add(label, zone());
+  // Remove the "ghost" variable that turned out to be a label
+  // from the top scope. This way, we don't try to resolve it
+  // during the scope processing.
+  scope()->RemoveUnresolved(var);
+  return labels;
 }
 
-Statement* Parser::ParseHoistableDeclaration(
-    int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
-    bool default_export, bool* ok) {
-  // FunctionDeclaration ::
-  //   'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
-  //   'function' '(' FormalParameters ')' '{' FunctionBody '}'
-  // GeneratorDeclaration ::
-  //   'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
-  //   'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
-  //
-  // The anonymous forms are allowed iff [default_export] is true.
-  //
-  // 'function' and '*' (if present) have been consumed by the caller.
-
-  const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
-  const bool is_async = flags & ParseFunctionFlags::kIsAsync;
-  DCHECK(!is_generator || !is_async);
-
-  const AstRawString* name;
-  FunctionNameValidity name_validity;
-  const AstRawString* variable_name;
-  if (default_export && peek() == Token::LPAREN) {
-    name = ast_value_factory()->default_string();
-    name_validity = kSkipFunctionNameCheck;
-    variable_name = ast_value_factory()->star_default_star_string();
-  } else {
-    bool is_strict_reserved;
-    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-    name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
-                                       : kFunctionNameValidityUnknown;
-    variable_name = name;
-  }
-
-  FuncNameInferrer::State fni_state(fni_);
-  if (fni_ != NULL) fni_->PushEnclosingName(name);
-  FunctionLiteral* fun = ParseFunctionLiteral(
-      name, scanner()->location(), name_validity,
-      is_generator ? FunctionKind::kGeneratorFunction
-                   : is_async ? FunctionKind::kAsyncFunction
-                              : FunctionKind::kNormalFunction,
-      pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
-
-  // In ES6, a function behaves as a lexical binding, except in
-  // a script scope, or the initial scope of eval or another function.
-  VariableMode mode =
-      (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
-                                                                       : VAR;
-  VariableProxy* proxy = NewUnresolved(variable_name);
-  Declaration* declaration =
-      factory()->NewFunctionDeclaration(proxy, fun, scope(), pos);
-  Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
-          CHECK_OK);
-  if (names) names->Add(variable_name, zone());
-  EmptyStatement* empty = factory()->NewEmptyStatement(kNoSourcePosition);
-  // Async functions don't undergo sloppy mode block scoped hoisting, and don't
-  // allow duplicates in a block. Both are represented by the
-  // sloppy_block_function_map. Don't add them to the map for async functions.
-  // Generators are also supposed to be prohibited; currently doing this behind
-  // a flag and UseCounting violations to assess web compatibility.
-  if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
-      !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
-    SloppyBlockFunctionStatement* delegate =
-        factory()->NewSloppyBlockFunctionStatement(empty, scope());
-    DeclarationScope* target_scope = GetDeclarationScope();
-    target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
-    return delegate;
-  }
-  return empty;
-}
-
-Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
-                                         bool default_export, bool* ok) {
-  // ClassDeclaration ::
-  //   'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
-  //   'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
-  //
-  // The anonymous form is allowed iff [default_export] is true.
-  //
-  // 'class' is expected to be consumed by the caller.
-  //
-  // A ClassDeclaration
-  //
-  //   class C { ... }
-  //
-  // has the same semantics as:
-  //
-  //   let C = class C { ... };
-  //
-  // so rewrite it as such.
-
-  int pos = position();
-
-  const AstRawString* name;
-  bool is_strict_reserved;
-  const AstRawString* variable_name;
-  if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
-    name = ast_value_factory()->default_string();
-    is_strict_reserved = false;
-    variable_name = ast_value_factory()->star_default_star_string();
-  } else {
-    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-    variable_name = name;
-  }
-
-  Expression* value = ParseClassLiteral(nullptr, name, scanner()->location(),
-                                        is_strict_reserved, pos, CHECK_OK);
-
-  Declaration* decl = DeclareVariable(variable_name, LET, pos, CHECK_OK);
-  decl->proxy()->var()->set_initializer_position(position());
-  Assignment* assignment =
-      factory()->NewAssignment(Token::INIT, decl->proxy(), value, pos);
-  Statement* assignment_statement =
-      factory()->NewExpressionStatement(assignment, kNoSourcePosition);
-  if (names) names->Add(variable_name, zone());
-  return assignment_statement;
-}
-
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
-  // The harmony mode uses block elements instead of statements.
-  //
-  // Block ::
-  //   '{' StatementList '}'
-
-  // Construct block expecting 16 statements.
-  Block* body = factory()->NewBlock(labels, 16, false, kNoSourcePosition);
-
-  // Parse the statements and collect escaping labels.
-  Expect(Token::LBRACE, CHECK_OK);
-  {
-    BlockState block_state(&scope_state_);
-    block_state.set_start_position(scanner()->location().beg_pos);
-    Target target(&this->target_stack_, body);
-
-    while (peek() != Token::RBRACE) {
-      Statement* stat = ParseStatementListItem(CHECK_OK);
-      if (stat && !stat->IsEmpty()) {
-        body->statements()->Add(stat, zone());
-      }
-    }
-
-    Expect(Token::RBRACE, CHECK_OK);
-    block_state.set_end_position(scanner()->location().end_pos);
-    body->set_scope(block_state.FinalizedBlockScope());
-  }
-  return body;
-}
-
-
-Block* Parser::DeclarationParsingResult::BuildInitializationBlock(
-    ZoneList<const AstRawString*>* names, bool* ok) {
-  Block* result = descriptor.parser->factory()->NewBlock(
-      NULL, 1, true, descriptor.declaration_pos);
-  for (auto declaration : declarations) {
-    PatternRewriter::DeclareAndInitializeVariables(
-        result, &descriptor, &declaration, names, CHECK_OK);
-  }
-  return result;
-}
-
-
-Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
-                                      ZoneList<const AstRawString*>* names,
-                                      bool* ok) {
-  // VariableStatement ::
-  //   VariableDeclarations ';'
-
-  // The scope of a var declared variable anywhere inside a function
-  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
-  // transform a source-level var declaration into a (Function) Scope
-  // declaration, and rewrite the source-level initialization into an assignment
-  // statement. We use a block to collect multiple assignments.
-  //
-  // We mark the block as initializer block because we don't want the
-  // rewriter to add a '.result' assignment to such a block (to get compliant
-  // behavior for code such as print(eval('var x = 7')), and for cosmetic
-  // reasons when pretty-printing. Also, unless an assignment (initialization)
-  // is inside an initializer block, it is ignored.
-
-  DeclarationParsingResult parsing_result;
-  Block* result =
-      ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return result;
-}
-
-Block* Parser::ParseVariableDeclarations(
-    VariableDeclarationContext var_context,
-    DeclarationParsingResult* parsing_result,
-    ZoneList<const AstRawString*>* names, bool* ok) {
-  // VariableDeclarations ::
-  //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
-  //
-  // The ES6 Draft Rev3 specifies the following grammar for const declarations
-  //
-  // ConstDeclaration ::
-  //   const ConstBinding (',' ConstBinding)* ';'
-  // ConstBinding ::
-  //   Identifier '=' AssignmentExpression
-  //
-  // TODO(ES6):
-  // ConstBinding ::
-  //   BindingPattern '=' AssignmentExpression
-
-  parsing_result->descriptor.parser = this;
-  parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
-  parsing_result->descriptor.declaration_pos = peek_position();
-  parsing_result->descriptor.initialization_pos = peek_position();
-  parsing_result->descriptor.mode = VAR;
-
-  Block* init_block = nullptr;
-  if (var_context != kForStatement) {
-    init_block = factory()->NewBlock(
-        NULL, 1, true, parsing_result->descriptor.declaration_pos);
-  }
-
-  if (peek() == Token::VAR) {
-    Consume(Token::VAR);
-  } else if (peek() == Token::CONST) {
-    Consume(Token::CONST);
-    DCHECK(var_context != kStatement);
-    parsing_result->descriptor.mode = CONST;
-  } else if (peek() == Token::LET) {
-    Consume(Token::LET);
-    DCHECK(var_context != kStatement);
-    parsing_result->descriptor.mode = LET;
-  } else {
-    UNREACHABLE();  // by current callers
-  }
-
-  parsing_result->descriptor.scope = scope();
-  parsing_result->descriptor.hoist_scope = nullptr;
-
-
-  bool first_declaration = true;
-  int bindings_start = peek_position();
-  do {
-    FuncNameInferrer::State fni_state(fni_);
-
-    // Parse name.
-    if (!first_declaration) Consume(Token::COMMA);
-
-    Expression* pattern;
-    int decl_pos = peek_position();
-    {
-      ExpressionClassifier pattern_classifier(this);
-      pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-      ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-      if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
-        ValidateLetPattern(&pattern_classifier, CHECK_OK);
-      }
-    }
-
-    Scanner::Location variable_loc = scanner()->location();
-    const AstRawString* single_name =
-        pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
-                                   : nullptr;
-    if (single_name != nullptr) {
-      if (fni_ != NULL) fni_->PushVariableName(single_name);
-    }
-
-    Expression* value = NULL;
-    int initializer_position = kNoSourcePosition;
-    if (Check(Token::ASSIGN)) {
-      ExpressionClassifier classifier(this);
-      value = ParseAssignmentExpression(var_context != kForStatement,
-                                        &classifier, CHECK_OK);
-      RewriteNonPattern(&classifier, CHECK_OK);
-      variable_loc.end_pos = scanner()->location().end_pos;
-
-      if (!parsing_result->first_initializer_loc.IsValid()) {
-        parsing_result->first_initializer_loc = variable_loc;
-      }
-
-      // Don't infer if it is "a = function(){...}();"-like expression.
-      if (single_name) {
-        if (fni_ != NULL && value->AsCall() == NULL &&
-            value->AsCallNew() == NULL) {
-          fni_->Infer();
-        } else {
-          fni_->RemoveLastFunction();
-        }
-      }
-
-      ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(value,
-                                                                 pattern);
-
-      // End position of the initializer is after the assignment expression.
-      initializer_position = scanner()->location().end_pos;
-    } else {
-      // Initializers may be either required or implied unless this is a
-      // for-in/of iteration variable.
-      if (var_context != kForStatement || !PeekInOrOf()) {
-        // ES6 'const' and binding patterns require initializers.
-        if (parsing_result->descriptor.mode == CONST ||
-            !pattern->IsVariableProxy()) {
-          ReportMessageAt(
-              Scanner::Location(decl_pos, scanner()->location().end_pos),
-              MessageTemplate::kDeclarationMissingInitializer,
-              !pattern->IsVariableProxy() ? "destructuring" : "const");
-          *ok = false;
-          return nullptr;
-        }
-
-        // 'let x' initializes 'x' to undefined.
-        if (parsing_result->descriptor.mode == LET) {
-          value = GetLiteralUndefined(position());
-        }
-      }
-
-      // End position of the initializer is after the variable.
-      initializer_position = position();
-    }
-
-    DeclarationParsingResult::Declaration decl(pattern, initializer_position,
-                                               value);
-    if (var_context == kForStatement) {
-      // Save the declaration for further handling in ParseForStatement.
-      parsing_result->declarations.Add(decl);
-    } else {
-      // Immediately declare the variable otherwise. This avoids O(N^2)
-      // behavior (where N is the number of variables in a single
-      // declaration) in the PatternRewriter having to do with removing
-      // and adding VariableProxies to the Scope (see bug 4699).
-      DCHECK_NOT_NULL(init_block);
-      PatternRewriter::DeclareAndInitializeVariables(
-          init_block, &parsing_result->descriptor, &decl, names, CHECK_OK);
-    }
-    first_declaration = false;
-  } while (peek() == Token::COMMA);
-
-  parsing_result->bindings_loc =
-      Scanner::Location(bindings_start, scanner()->location().end_pos);
-
-  DCHECK(*ok);
-  return init_block;
-}
-
-
-static bool ContainsLabel(ZoneList<const AstRawString*>* labels,
-                          const AstRawString* label) {
-  DCHECK(label != NULL);
-  if (labels != NULL) {
-    for (int i = labels->length(); i-- > 0; ) {
-      if (labels->at(i) == label) {
-        return true;
-      }
+bool Parser::ContainsLabel(ZoneList<const AstRawString*>* labels,
+                           const AstRawString* label) {
+  DCHECK_NOT_NULL(label);
+  if (labels != nullptr) {
+    for (int i = labels->length(); i-- > 0;) {
+      if (labels->at(i) == label) return true;
     }
   }
   return false;
 }
 
-Statement* Parser::ParseFunctionDeclaration(bool* ok) {
-  Consume(Token::FUNCTION);
-  int pos = position();
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
-  if (Check(Token::MUL)) {
-    flags |= ParseFunctionFlags::kIsGenerator;
-    if (allow_harmony_restrictive_declarations()) {
-      ReportMessageAt(scanner()->location(),
-                      MessageTemplate::kGeneratorInLegacyContext);
-      *ok = false;
-      return nullptr;
-    }
+Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
+  if (IsSubclassConstructor(function_state_->kind())) {
+    // For subclass constructors we need to return this in case of undefined
+    // return a Smi (transformed into an exception in the ConstructStub)
+    // for a non object.
+    //
+    //   return expr;
+    //
+    // Is rewritten as:
+    //
+    //   return (temp = expr) === undefined ? this :
+    //       %_IsJSReceiver(temp) ? temp : 1;
+
+    // temp = expr
+    Variable* temp = NewTemporary(ast_value_factory()->empty_string());
+    Assignment* assign = factory()->NewAssignment(
+        Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
+
+    // %_IsJSReceiver(temp)
+    ZoneList<Expression*>* is_spec_object_args =
+        new (zone()) ZoneList<Expression*>(1, zone());
+    is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
+    Expression* is_spec_object_call = factory()->NewCallRuntime(
+        Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
+
+    // %_IsJSReceiver(temp) ? temp : 1;
+    Expression* is_object_conditional = factory()->NewConditional(
+        is_spec_object_call, factory()->NewVariableProxy(temp),
+        factory()->NewSmiLiteral(1, pos), pos);
+
+    // temp === undefined
+    Expression* is_undefined = factory()->NewCompareOperation(
+        Token::EQ_STRICT, assign,
+        factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
+
+    // is_undefined ? this : is_object_conditional
+    return_value = factory()->NewConditional(is_undefined, ThisExpression(pos),
+                                             is_object_conditional, pos);
   }
-
-  return ParseHoistableDeclaration(pos, flags, nullptr, false, CHECK_OK);
-}
-
-Statement* Parser::ParseExpressionOrLabelledStatement(
-    ZoneList<const AstRawString*>* labels,
-    AllowLabelledFunctionStatement allow_function, bool* ok) {
-  // ExpressionStatement | LabelledStatement ::
-  //   Expression ';'
-  //   Identifier ':' Statement
-  //
-  // ExpressionStatement[Yield] :
-  //   [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
-
-  int pos = peek_position();
-
-  switch (peek()) {
-    case Token::FUNCTION:
-    case Token::LBRACE:
-      UNREACHABLE();  // Always handled by the callers.
-    case Token::CLASS:
-      ReportUnexpectedToken(Next());
-      *ok = false;
-      return nullptr;
-    default:
-      break;
-  }
-
-  bool starts_with_idenfifier = peek_any_identifier();
-  Expression* expr = ParseExpression(true, CHECK_OK);
-  if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
-      expr->AsVariableProxy() != NULL &&
-      !expr->AsVariableProxy()->is_this()) {
-    // Expression is a single identifier, and not, e.g., a parenthesized
-    // identifier.
-    VariableProxy* var = expr->AsVariableProxy();
-    const AstRawString* label = var->raw_name();
-    // TODO(1240780): We don't check for redeclaration of labels
-    // during preparsing since keeping track of the set of active
-    // labels requires nontrivial changes to the way scopes are
-    // structured.  However, these are probably changes we want to
-    // make later anyway so we should go back and fix this then.
-    if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
-      ReportMessage(MessageTemplate::kLabelRedeclaration, label);
-      *ok = false;
-      return NULL;
-    }
-    if (labels == NULL) {
-      labels = new(zone()) ZoneList<const AstRawString*>(4, zone());
-    }
-    labels->Add(label, zone());
-    // Remove the "ghost" variable that turned out to be a label
-    // from the top scope. This way, we don't try to resolve it
-    // during the scope processing.
-    scope()->RemoveUnresolved(var);
-    Expect(Token::COLON, CHECK_OK);
-    // ES#sec-labelled-function-declarations Labelled Function Declarations
-    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
-      if (allow_function == kAllowLabelledFunctionStatement) {
-        return ParseFunctionDeclaration(ok);
-      } else {
-        return ParseScopedStatement(labels, true, ok);
-      }
-    }
-    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
-  }
-
-  // If we have an extension, we allow a native function declaration.
-  // A native function declaration starts with "native function" with
-  // no line-terminator between the two words.
-  if (extension_ != NULL && peek() == Token::FUNCTION &&
-      !scanner()->HasAnyLineTerminatorBeforeNext() && expr != NULL &&
-      expr->AsVariableProxy() != NULL &&
-      expr->AsVariableProxy()->raw_name() ==
-          ast_value_factory()->native_string() &&
-      !scanner()->literal_contains_escapes()) {
-    return ParseNativeDeclaration(ok);
-  }
-
-  // Parsed expression statement, followed by semicolon.
-  ExpectSemicolon(CHECK_OK);
-  return factory()->NewExpressionStatement(expr, pos);
-}
-
-
-IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
-                                      bool* ok) {
-  // IfStatement ::
-  //   'if' '(' Expression ')' Statement ('else' Statement)?
-
-  int pos = peek_position();
-  Expect(Token::IF, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  Expression* condition = ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-  Statement* then_statement = ParseScopedStatement(labels, false, CHECK_OK);
-  Statement* else_statement = NULL;
-  if (peek() == Token::ELSE) {
-    Next();
-    else_statement = ParseScopedStatement(labels, false, CHECK_OK);
-  } else {
-    else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
-  }
-  return factory()->NewIfStatement(
-      condition, then_statement, else_statement, pos);
-}
-
-
-Statement* Parser::ParseContinueStatement(bool* ok) {
-  // ContinueStatement ::
-  //   'continue' Identifier? ';'
-
-  int pos = peek_position();
-  Expect(Token::CONTINUE, CHECK_OK);
-  const AstRawString* label = NULL;
-  Token::Value tok = peek();
-  if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
-      tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
-    // ECMA allows "eval" or "arguments" as labels even in strict mode.
-    label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  }
-  IterationStatement* target = LookupContinueTarget(label, CHECK_OK);
-  if (target == NULL) {
-    // Illegal continue statement.
-    MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
-    if (label != NULL) {
-      message = MessageTemplate::kUnknownLabel;
-    }
-    ReportMessage(message, label);
-    *ok = false;
-    return NULL;
-  }
-  ExpectSemicolon(CHECK_OK);
-  return factory()->NewContinueStatement(target, pos);
-}
-
-
-Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels,
-                                       bool* ok) {
-  // BreakStatement ::
-  //   'break' Identifier? ';'
-
-  int pos = peek_position();
-  Expect(Token::BREAK, CHECK_OK);
-  const AstRawString* label = NULL;
-  Token::Value tok = peek();
-  if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
-      tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
-    // ECMA allows "eval" or "arguments" as labels even in strict mode.
-    label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  }
-  // Parse labeled break statements that target themselves into
-  // empty statements, e.g. 'l1: l2: l3: break l2;'
-  if (label != NULL && ContainsLabel(labels, label)) {
-    ExpectSemicolon(CHECK_OK);
-    return factory()->NewEmptyStatement(pos);
-  }
-  BreakableStatement* target = NULL;
-  target = LookupBreakTarget(label, CHECK_OK);
-  if (target == NULL) {
-    // Illegal break statement.
-    MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
-    if (label != NULL) {
-      message = MessageTemplate::kUnknownLabel;
-    }
-    ReportMessage(message, label);
-    *ok = false;
-    return NULL;
-  }
-  ExpectSemicolon(CHECK_OK);
-  return factory()->NewBreakStatement(target, pos);
-}
-
-
-Statement* Parser::ParseReturnStatement(bool* ok) {
-  // ReturnStatement ::
-  //   'return' Expression? ';'
-
-  // Consume the return token. It is necessary to do that before
-  // reporting any errors on it, because of the way errors are
-  // reported (underlining).
-  Expect(Token::RETURN, CHECK_OK);
-  Scanner::Location loc = scanner()->location();
-
-  Token::Value tok = peek();
-  Statement* result;
-  Expression* return_value;
-  if (scanner()->HasAnyLineTerminatorBeforeNext() ||
-      tok == Token::SEMICOLON ||
-      tok == Token::RBRACE ||
-      tok == Token::EOS) {
-    if (IsSubclassConstructor(function_state_->kind())) {
-      return_value = ThisExpression(loc.beg_pos);
-    } else {
-      return_value = GetLiteralUndefined(position());
-    }
-  } else {
-    int pos = peek_position();
-
-    if (IsSubclassConstructor(function_state_->kind())) {
-      // Because of the return code rewriting that happens in case of a subclass
-      // constructor we don't want to accept tail calls, therefore we don't set
-      // ReturnExprScope to kInsideValidReturnStatement here.
-      return_value = ParseExpression(true, CHECK_OK);
-
-      // For subclass constructors we need to return this in case of undefined
-      // return a Smi (transformed into an exception in the ConstructStub)
-      // for a non object.
-      //
-      //   return expr;
-      //
-      // Is rewritten as:
-      //
-      //   return (temp = expr) === undefined ? this :
-      //       %_IsJSReceiver(temp) ? temp : 1;
-
-      // temp = expr
-      Variable* temp = NewTemporary(ast_value_factory()->empty_string());
-      Assignment* assign = factory()->NewAssignment(
-          Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
-
-      // %_IsJSReceiver(temp)
-      ZoneList<Expression*>* is_spec_object_args =
-          new (zone()) ZoneList<Expression*>(1, zone());
-      is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
-      Expression* is_spec_object_call = factory()->NewCallRuntime(
-          Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
-
-      // %_IsJSReceiver(temp) ? temp : 1;
-      Expression* is_object_conditional = factory()->NewConditional(
-          is_spec_object_call, factory()->NewVariableProxy(temp),
-          factory()->NewSmiLiteral(1, pos), pos);
-
-      // temp === undefined
-      Expression* is_undefined = factory()->NewCompareOperation(
-          Token::EQ_STRICT, assign,
-          factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
-
-      // is_undefined ? this : is_object_conditional
-      return_value = factory()->NewConditional(
-          is_undefined, ThisExpression(pos), is_object_conditional, pos);
-    } else {
-      ReturnExprScope maybe_allow_tail_calls(
-          function_state_, ReturnExprContext::kInsideValidReturnStatement);
-      return_value = ParseExpression(true, CHECK_OK);
-
-      if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
-        // ES6 14.6.1 Static Semantics: IsInTailPosition
-        function_state_->AddImplicitTailCallExpression(return_value);
-      }
-    }
-  }
-  ExpectSemicolon(CHECK_OK);
-
   if (is_generator()) {
     return_value = BuildIteratorResult(return_value, true);
   } else if (is_async_function()) {
-    return_value = BuildPromiseResolve(return_value, return_value->position());
+    return_value = BuildResolvePromise(return_value, return_value->position());
   }
+  return return_value;
+}
 
-  result = factory()->NewReturnStatement(return_value, loc.beg_pos);
-
-  DeclarationScope* decl_scope = GetDeclarationScope();
-  if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
-    ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
+Expression* Parser::RewriteDoExpression(Block* body, int pos, bool* ok) {
+  Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+  DoExpression* expr = factory()->NewDoExpression(body, result, pos);
+  if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
     *ok = false;
-    return NULL;
+    return nullptr;
   }
-  return result;
+  return expr;
 }
 
-
-Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
-                                      bool* ok) {
-  // WithStatement ::
-  //   'with' '(' Expression ')' Statement
-
-  Expect(Token::WITH, CHECK_OK);
-  int pos = position();
-
-  if (is_strict(language_mode())) {
-    ReportMessage(MessageTemplate::kStrictWith);
-    *ok = false;
-    return NULL;
-  }
-
-  Expect(Token::LPAREN, CHECK_OK);
-  Expression* expr = ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-
-  Scope* with_scope = NewScope(WITH_SCOPE);
-  Statement* body;
-  {
-    BlockState block_state(&scope_state_, with_scope);
-    with_scope->set_start_position(scanner()->peek_location().beg_pos);
-    body = ParseScopedStatement(labels, true, CHECK_OK);
-    with_scope->set_end_position(scanner()->location().end_pos);
-  }
-  return factory()->NewWithStatement(with_scope, expr, body, pos);
-}
-
-
-CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
-  // CaseClause ::
-  //   'case' Expression ':' StatementList
-  //   'default' ':' StatementList
-
-  Expression* label = NULL;  // NULL expression indicates default case
-  if (peek() == Token::CASE) {
-    Expect(Token::CASE, CHECK_OK);
-    label = ParseExpression(true, CHECK_OK);
-  } else {
-    Expect(Token::DEFAULT, CHECK_OK);
-    if (*default_seen_ptr) {
-      ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
-      *ok = false;
-      return NULL;
-    }
-    *default_seen_ptr = true;
-  }
-  Expect(Token::COLON, CHECK_OK);
-  int pos = position();
-  ZoneList<Statement*>* statements =
-      new(zone()) ZoneList<Statement*>(5, zone());
-  Statement* stat = NULL;
-  while (peek() != Token::CASE &&
-         peek() != Token::DEFAULT &&
-         peek() != Token::RBRACE) {
-    stat = ParseStatementListItem(CHECK_OK);
-    statements->Add(stat, zone());
-  }
-  return factory()->NewCaseClause(label, statements, pos);
-}
-
-
-Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
-                                        bool* ok) {
-  // SwitchStatement ::
-  //   'switch' '(' Expression ')' '{' CaseClause* '}'
+Statement* Parser::RewriteSwitchStatement(Expression* tag,
+                                          SwitchStatement* switch_statement,
+                                          ZoneList<CaseClause*>* cases,
+                                          Scope* scope) {
   // In order to get the CaseClauses to execute in their own lexical scope,
   // but without requiring downstream code to have special scope handling
   // code for switch statements, desugar into blocks as follows:
@@ -2728,12 +1734,6 @@
   // }
 
   Block* switch_block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
-  int switch_pos = peek_position();
-
-  Expect(Token::SWITCH, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  Expression* tag = ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
 
   Variable* tag_variable =
       NewTemporary(ast_value_factory()->dot_switch_tag_string());
@@ -2752,301 +1752,112 @@
           factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
       zone());
 
+  Expression* tag_read = factory()->NewVariableProxy(tag_variable);
+  switch_statement->Initialize(tag_read, cases);
   Block* cases_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
-
-  SwitchStatement* switch_statement =
-      factory()->NewSwitchStatement(labels, switch_pos);
-
-  {
-    BlockState cases_block_state(&scope_state_);
-    cases_block_state.set_start_position(scanner()->location().beg_pos);
-    cases_block_state.SetNonlinear();
-    Target target(&this->target_stack_, switch_statement);
-
-    Expression* tag_read = factory()->NewVariableProxy(tag_variable);
-
-    bool default_seen = false;
-    ZoneList<CaseClause*>* cases =
-        new (zone()) ZoneList<CaseClause*>(4, zone());
-    Expect(Token::LBRACE, CHECK_OK);
-    while (peek() != Token::RBRACE) {
-      CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
-      cases->Add(clause, zone());
-    }
-    switch_statement->Initialize(tag_read, cases);
-    cases_block->statements()->Add(switch_statement, zone());
-    Expect(Token::RBRACE, CHECK_OK);
-
-    cases_block_state.set_end_position(scanner()->location().end_pos);
-    cases_block->set_scope(cases_block_state.FinalizedBlockScope());
-  }
-
+  cases_block->statements()->Add(switch_statement, zone());
+  cases_block->set_scope(scope);
   switch_block->statements()->Add(cases_block, zone());
-
   return switch_block;
 }
 
-
-Statement* Parser::ParseThrowStatement(bool* ok) {
-  // ThrowStatement ::
-  //   'throw' Expression ';'
-
-  Expect(Token::THROW, CHECK_OK);
-  int pos = position();
-  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
-    ReportMessage(MessageTemplate::kNewlineAfterThrow);
-    *ok = false;
-    return NULL;
+void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
+  if (catch_info->name == nullptr) {
+    DCHECK_NOT_NULL(catch_info->pattern);
+    catch_info->name = ast_value_factory()->dot_catch_string();
   }
-  Expression* exception = ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
+  catch_info->variable = catch_info->scope->DeclareLocal(
+      catch_info->name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+  if (catch_info->pattern != nullptr) {
+    DeclarationDescriptor descriptor;
+    descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+    descriptor.scope = scope();
+    descriptor.hoist_scope = nullptr;
+    descriptor.mode = LET;
+    descriptor.declaration_pos = catch_info->pattern->position();
+    descriptor.initialization_pos = catch_info->pattern->position();
 
-  return factory()->NewExpressionStatement(
-      factory()->NewThrow(exception, pos), pos);
+    // Initializer position for variables declared by the pattern.
+    const int initializer_position = position();
+
+    DeclarationParsingResult::Declaration decl(
+        catch_info->pattern, initializer_position,
+        factory()->NewVariableProxy(catch_info->variable));
+
+    catch_info->init_block =
+        factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
+    PatternRewriter::DeclareAndInitializeVariables(
+        this, catch_info->init_block, &descriptor, &decl,
+        &catch_info->bound_names, ok);
+  } else {
+    catch_info->bound_names.Add(catch_info->name, zone());
+  }
 }
 
-
-TryStatement* Parser::ParseTryStatement(bool* ok) {
-  // TryStatement ::
-  //   'try' Block Catch
-  //   'try' Block Finally
-  //   'try' Block Catch Finally
-  //
-  // Catch ::
-  //   'catch' '(' Identifier ')' Block
-  //
-  // Finally ::
-  //   'finally' Block
-
-  Expect(Token::TRY, CHECK_OK);
-  int pos = position();
-
-  Block* try_block;
-  {
-    ReturnExprScope no_tail_calls(function_state_,
-                                  ReturnExprContext::kInsideTryBlock);
-    try_block = ParseBlock(NULL, CHECK_OK);
-  }
-
-  Token::Value tok = peek();
-
-  bool catch_for_promise_reject = false;
-  if (allow_natives() && tok == Token::MOD) {
-    Consume(Token::MOD);
-    catch_for_promise_reject = true;
-    tok = peek();
-  }
-
-  if (tok != Token::CATCH && tok != Token::FINALLY) {
-    ReportMessage(MessageTemplate::kNoCatchOrFinally);
-    *ok = false;
-    return NULL;
-  }
-
-  Scope* catch_scope = NULL;
-  Variable* catch_variable = NULL;
-  Block* catch_block = NULL;
-  TailCallExpressionList tail_call_expressions_in_catch_block(zone());
-  if (tok == Token::CATCH) {
-    Consume(Token::CATCH);
-
-    Expect(Token::LPAREN, CHECK_OK);
-    catch_scope = NewScope(CATCH_SCOPE);
-    catch_scope->set_start_position(scanner()->location().beg_pos);
-
-    {
-      CollectExpressionsInTailPositionToListScope
-          collect_tail_call_expressions_scope(
-              function_state_, &tail_call_expressions_in_catch_block);
-      BlockState block_state(&scope_state_, catch_scope);
-
-      catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
-
-      // Create a block scope to hold any lexical declarations created
-      // as part of destructuring the catch parameter.
-      {
-        BlockState block_state(&scope_state_);
-        block_state.set_start_position(scanner()->location().beg_pos);
-        Target target(&this->target_stack_, catch_block);
-
-        const AstRawString* name = ast_value_factory()->dot_catch_string();
-        Expression* pattern = nullptr;
-        if (peek_any_identifier()) {
-          name = ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
-        } else {
-          ExpressionClassifier pattern_classifier(this);
-          pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-          ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-        }
-        catch_variable = catch_scope->DeclareLocal(
-            name, VAR, kCreatedInitialized, Variable::NORMAL);
-
-        Expect(Token::RPAREN, CHECK_OK);
-
-        ZoneList<const AstRawString*> bound_names(1, zone());
-        if (pattern != nullptr) {
-          DeclarationDescriptor descriptor;
-          descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
-          descriptor.parser = this;
-          descriptor.scope = scope();
-          descriptor.hoist_scope = nullptr;
-          descriptor.mode = LET;
-          descriptor.declaration_pos = pattern->position();
-          descriptor.initialization_pos = pattern->position();
-
-          // Initializer position for variables declared by the pattern.
-          const int initializer_position = position();
-
-          DeclarationParsingResult::Declaration decl(
-              pattern, initializer_position,
-              factory()->NewVariableProxy(catch_variable));
-
-          Block* init_block =
-              factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
-          PatternRewriter::DeclareAndInitializeVariables(
-              init_block, &descriptor, &decl, &bound_names, CHECK_OK);
-          catch_block->statements()->Add(init_block, zone());
-        } else {
-          bound_names.Add(name, zone());
-        }
-
-        Block* inner_block = ParseBlock(nullptr, CHECK_OK);
-        catch_block->statements()->Add(inner_block, zone());
-
-        // Check for `catch(e) { let e; }` and similar errors.
-        Scope* inner_block_scope = inner_block->scope();
-        if (inner_block_scope != nullptr) {
-          Declaration* decl =
-              inner_block_scope->CheckLexDeclarationsConflictingWith(
-                  bound_names);
-          if (decl != nullptr) {
-            const AstRawString* name = decl->proxy()->raw_name();
-            int position = decl->proxy()->position();
-            Scanner::Location location =
-                position == kNoSourcePosition
-                    ? Scanner::Location::invalid()
-                    : Scanner::Location(position, position + 1);
-            ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
-            *ok = false;
-            return nullptr;
-          }
-        }
-        block_state.set_end_position(scanner()->location().end_pos);
-        catch_block->set_scope(block_state.FinalizedBlockScope());
-      }
+void Parser::ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {
+  // Check for `catch(e) { let e; }` and similar errors.
+  Scope* inner_block_scope = catch_info.inner_block->scope();
+  if (inner_block_scope != nullptr) {
+    Declaration* decl = inner_block_scope->CheckLexDeclarationsConflictingWith(
+        catch_info.bound_names);
+    if (decl != nullptr) {
+      const AstRawString* name = decl->proxy()->raw_name();
+      int position = decl->proxy()->position();
+      Scanner::Location location =
+          position == kNoSourcePosition
+              ? Scanner::Location::invalid()
+              : Scanner::Location(position, position + 1);
+      ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
+      *ok = false;
     }
-
-    catch_scope->set_end_position(scanner()->location().end_pos);
-    tok = peek();
   }
+}
 
-  Block* finally_block = NULL;
-  DCHECK(tok == Token::FINALLY || catch_block != NULL);
-  if (tok == Token::FINALLY) {
-    Consume(Token::FINALLY);
-    finally_block = ParseBlock(NULL, CHECK_OK);
-  }
-
+Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
+                                       Block* finally_block,
+                                       const CatchInfo& catch_info, int pos) {
   // Simplify the AST nodes by converting:
   //   'try B0 catch B1 finally B2'
   // to:
   //   'try { try B0 catch B1 } finally B2'
 
-  if (catch_block != NULL && finally_block != NULL) {
+  if (catch_block != nullptr && finally_block != nullptr) {
     // If we have both, create an inner try/catch.
-    DCHECK(catch_scope != NULL && catch_variable != NULL);
+    DCHECK_NOT_NULL(catch_info.scope);
+    DCHECK_NOT_NULL(catch_info.variable);
     TryCatchStatement* statement;
-    if (catch_for_promise_reject) {
+    if (catch_info.for_promise_reject) {
       statement = factory()->NewTryCatchStatementForPromiseReject(
-          try_block, catch_scope, catch_variable, catch_block,
+          try_block, catch_info.scope, catch_info.variable, catch_block,
           kNoSourcePosition);
     } else {
-      statement = factory()->NewTryCatchStatement(try_block, catch_scope,
-                                                  catch_variable, catch_block,
-                                                  kNoSourcePosition);
+      statement = factory()->NewTryCatchStatement(
+          try_block, catch_info.scope, catch_info.variable, catch_block,
+          kNoSourcePosition);
     }
 
-    try_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
+    try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
     try_block->statements()->Add(statement, zone());
-    catch_block = NULL;  // Clear to indicate it's been handled.
+    catch_block = nullptr;  // Clear to indicate it's been handled.
   }
 
-  TryStatement* result = NULL;
-  if (catch_block != NULL) {
+  if (catch_block != nullptr) {
     // For a try-catch construct append return expressions from the catch block
     // to the list of return expressions.
     function_state_->tail_call_expressions().Append(
-        tail_call_expressions_in_catch_block);
+        catch_info.tail_call_expressions);
 
-    DCHECK(finally_block == NULL);
-    DCHECK(catch_scope != NULL && catch_variable != NULL);
-    result = factory()->NewTryCatchStatement(try_block, catch_scope,
-                                             catch_variable, catch_block, pos);
+    DCHECK_NULL(finally_block);
+    DCHECK_NOT_NULL(catch_info.scope);
+    DCHECK_NOT_NULL(catch_info.variable);
+    return factory()->NewTryCatchStatement(
+        try_block, catch_info.scope, catch_info.variable, catch_block, pos);
   } else {
-    if (FLAG_harmony_explicit_tailcalls &&
-        tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
-      // TODO(ishell): update chapter number.
-      // ES8 XX.YY.ZZ
-      ReportMessageAt(tail_call_expressions_in_catch_block.location(),
-                      MessageTemplate::kUnexpectedTailCallInCatchBlock);
-      *ok = false;
-      return NULL;
-    }
-    DCHECK(finally_block != NULL);
-    result = factory()->NewTryFinallyStatement(try_block, finally_block, pos);
+    DCHECK_NOT_NULL(finally_block);
+    return factory()->NewTryFinallyStatement(try_block, finally_block, pos);
   }
-
-  return result;
 }
 
-
-DoWhileStatement* Parser::ParseDoWhileStatement(
-    ZoneList<const AstRawString*>* labels, bool* ok) {
-  // DoStatement ::
-  //   'do' Statement 'while' '(' Expression ')' ';'
-
-  DoWhileStatement* loop =
-      factory()->NewDoWhileStatement(labels, peek_position());
-  Target target(&this->target_stack_, loop);
-
-  Expect(Token::DO, CHECK_OK);
-  Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-  Expect(Token::WHILE, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-
-  Expression* cond = ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-
-  // Allow do-statements to be terminated with and without
-  // semi-colons. This allows code such as 'do;while(0)return' to
-  // parse, which would not be the case if we had used the
-  // ExpectSemicolon() functionality here.
-  if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
-
-  if (loop != NULL) loop->Initialize(cond, body);
-  return loop;
-}
-
-
-WhileStatement* Parser::ParseWhileStatement(
-    ZoneList<const AstRawString*>* labels, bool* ok) {
-  // WhileStatement ::
-  //   'while' '(' Expression ')' Statement
-
-  WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position());
-  Target target(&this->target_stack_, loop);
-
-  Expect(Token::WHILE, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  Expression* cond = ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-  Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-
-  if (loop != NULL) loop->Initialize(cond, body);
-  return loop;
-}
-
-
 // !%_IsJSReceiver(result = iterator.next()) &&
 //     %ThrowIteratorResultNotAnObject(result)
 Expression* Parser::BuildIteratorNextResult(Expression* iterator,
@@ -3115,6 +1926,138 @@
   return stmt;
 }
 
+// Special case for legacy for
+//
+//    for (var x = initializer in enumerable) body
+//
+// An initialization block of the form
+//
+//    {
+//      x = initializer;
+//    }
+//
+// is returned in this case.  It has reserved space for two statements,
+// so that (later on during parsing), the equivalent of
+//
+//   for (x in enumerable) body
+//
+// is added as a second statement to it.
+Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
+  const DeclarationParsingResult::Declaration& decl =
+      for_info.parsing_result.declarations[0];
+  if (!IsLexicalVariableMode(for_info.parsing_result.descriptor.mode) &&
+      decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+    DCHECK(!allow_harmony_for_in());
+    ++use_counts_[v8::Isolate::kForInInitializer];
+    const AstRawString* name = decl.pattern->AsVariableProxy()->raw_name();
+    VariableProxy* single_var = NewUnresolved(name);
+    Block* init_block = factory()->NewBlock(
+        nullptr, 2, true, for_info.parsing_result.descriptor.declaration_pos);
+    init_block->statements()->Add(
+        factory()->NewExpressionStatement(
+            factory()->NewAssignment(Token::ASSIGN, single_var,
+                                     decl.initializer, kNoSourcePosition),
+            kNoSourcePosition),
+        zone());
+    return init_block;
+  }
+  return nullptr;
+}
+
+// Rewrite a for-in/of statement of the form
+//
+//   for (let/const/var x in/of e) b
+//
+// into
+//
+//   {
+//     <let x' be a temporary variable>
+//     for (x' in/of e) {
+//       let/const/var x;
+//       x = x';
+//       b;
+//     }
+//     let x;  // for TDZ
+//   }
+void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
+                                              Block** body_block,
+                                              Expression** each_variable,
+                                              bool* ok) {
+  DeclarationParsingResult::Declaration& decl =
+      for_info->parsing_result.declarations[0];
+  Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
+  auto each_initialization_block =
+      factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+  {
+    auto descriptor = for_info->parsing_result.descriptor;
+    descriptor.declaration_pos = kNoSourcePosition;
+    descriptor.initialization_pos = kNoSourcePosition;
+    decl.initializer = factory()->NewVariableProxy(temp);
+
+    bool is_for_var_of =
+        for_info->mode == ForEachStatement::ITERATE &&
+        for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+
+    PatternRewriter::DeclareAndInitializeVariables(
+        this, each_initialization_block, &descriptor, &decl,
+        (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+         is_for_var_of)
+            ? &for_info->bound_names
+            : nullptr,
+        CHECK_OK_VOID);
+
+    // Annex B.3.5 prohibits the form
+    // `try {} catch(e) { for (var e of {}); }`
+    // So if we are parsing a statement like `for (var ... of ...)`
+    // we need to walk up the scope chain and look for catch scopes
+    // which have a simple binding, then compare their binding against
+    // all of the names declared in the init of the for-of we're
+    // parsing.
+    if (is_for_var_of) {
+      Scope* catch_scope = scope();
+      while (catch_scope != nullptr && !catch_scope->is_declaration_scope()) {
+        if (catch_scope->is_catch_scope()) {
+          auto name = catch_scope->catch_variable_name();
+          // If it's a simple binding and the name is declared in the for loop.
+          if (name != ast_value_factory()->dot_catch_string() &&
+              for_info->bound_names.Contains(name)) {
+            ReportMessageAt(for_info->parsing_result.bindings_loc,
+                            MessageTemplate::kVarRedeclaration, name);
+            *ok = false;
+            return;
+          }
+        }
+        catch_scope = catch_scope->outer_scope();
+      }
+    }
+  }
+
+  *body_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
+  (*body_block)->statements()->Add(each_initialization_block, zone());
+  *each_variable = factory()->NewVariableProxy(temp, for_info->each_loc.beg_pos,
+                                               for_info->each_loc.end_pos);
+}
+
+// Create a TDZ for any lexically-bound names in for in/of statements.
+Block* Parser::CreateForEachStatementTDZ(Block* init_block,
+                                         const ForInfo& for_info, bool* ok) {
+  if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
+    DCHECK_NULL(init_block);
+
+    init_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+
+    for (int i = 0; i < for_info.bound_names.length(); ++i) {
+      // TODO(adamk): This needs to be some sort of special
+      // INTERNAL variable that's invisible to the debugger
+      // but visible to everything else.
+      Declaration* tdz_decl = DeclareVariable(for_info.bound_names[i], LET,
+                                              kNoSourcePosition, CHECK_OK);
+      tdz_decl->proxy()->var()->set_initializer_position(position());
+    }
+  }
+  return init_block;
+}
+
 Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
                                             Expression* each,
                                             Expression* iterable,
@@ -3138,8 +2081,7 @@
   {
     assign_iterator = factory()->NewAssignment(
         Token::ASSIGN, factory()->NewVariableProxy(iterator),
-        GetIterator(iterable, factory(), iterable->position()),
-        iterable->position());
+        GetIterator(iterable, iterable->position()), iterable->position());
   }
 
   // !%_IsJSReceiver(result = iterator.next()) &&
@@ -3240,9 +2182,8 @@
 }
 
 Statement* Parser::DesugarLexicalBindingsInForStatement(
-    Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
     ForStatement* loop, Statement* init, Expression* cond, Statement* next,
-    Statement* body, bool* ok) {
+    Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok) {
   // ES6 13.7.4.8 specifies that on each loop iteration the let variables are
   // copied into a new environment.  Moreover, the "next" statement must be
   // evaluated not in the environment of the just completed iteration but in
@@ -3280,11 +2221,11 @@
   //    }
   //  }
 
-  DCHECK(names->length() > 0);
-  ZoneList<Variable*> temps(names->length(), zone());
+  DCHECK(for_info.bound_names.length() > 0);
+  ZoneList<Variable*> temps(for_info.bound_names.length(), zone());
 
-  Block* outer_block =
-      factory()->NewBlock(NULL, names->length() + 4, false, kNoSourcePosition);
+  Block* outer_block = factory()->NewBlock(
+      nullptr, for_info.bound_names.length() + 4, false, kNoSourcePosition);
 
   // Add statement: let/const x = i.
   outer_block->statements()->Add(init, zone());
@@ -3293,8 +2234,8 @@
 
   // For each lexical variable x:
   //   make statement: temp_x = x.
-  for (int i = 0; i < names->length(); i++) {
-    VariableProxy* proxy = NewUnresolved(names->at(i));
+  for (int i = 0; i < for_info.bound_names.length(); i++) {
+    VariableProxy* proxy = NewUnresolved(for_info.bound_names[i]);
     Variable* temp = NewTemporary(temp_name);
     VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
     Assignment* assignment = factory()->NewAssignment(Token::ASSIGN, temp_proxy,
@@ -3338,14 +2279,15 @@
   {
     BlockState block_state(&scope_state_, inner_scope);
 
-    Block* ignore_completion_block =
-        factory()->NewBlock(NULL, names->length() + 3, true, kNoSourcePosition);
-    ZoneList<Variable*> inner_vars(names->length(), zone());
+    Block* ignore_completion_block = factory()->NewBlock(
+        nullptr, for_info.bound_names.length() + 3, true, kNoSourcePosition);
+    ZoneList<Variable*> inner_vars(for_info.bound_names.length(), zone());
     // For each let variable x:
     //    make statement: let/const x = temp_x.
-    for (int i = 0; i < names->length(); i++) {
-      Declaration* decl =
-          DeclareVariable(names->at(i), mode, kNoSourcePosition, CHECK_OK);
+    for (int i = 0; i < for_info.bound_names.length(); i++) {
+      Declaration* decl = DeclareVariable(
+          for_info.bound_names[i], for_info.parsing_result.descriptor.mode,
+          kNoSourcePosition, CHECK_OK);
       inner_vars.Add(decl->proxy()->var(), zone());
       VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
       Assignment* assignment = factory()->NewAssignment(
@@ -3429,7 +2371,7 @@
 
       // Make the comma-separated list of temp_x = x assignments.
       int inner_var_proxy_pos = scanner()->location().beg_pos;
-      for (int i = 0; i < names->length(); i++) {
+      for (int i = 0; i < for_info.bound_names.length(); i++) {
         VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
         VariableProxy* proxy =
             factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
@@ -3479,433 +2421,7 @@
   return outer_block;
 }
 
-Statement* Parser::ParseScopedStatement(ZoneList<const AstRawString*>* labels,
-                                        bool legacy, bool* ok) {
-  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
-      (legacy && allow_harmony_restrictive_declarations())) {
-    return ParseSubStatement(labels, kDisallowLabelledFunctionStatement, ok);
-  } else {
-    if (legacy) {
-      ++use_counts_[v8::Isolate::kLegacyFunctionDeclaration];
-    }
-    // Make a block around the statement for a lexical binding
-    // is introduced by a FunctionDeclaration.
-    BlockState block_state(&scope_state_);
-    block_state.set_start_position(scanner()->location().beg_pos);
-    Block* block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
-    Statement* body = ParseFunctionDeclaration(CHECK_OK);
-    block->statements()->Add(body, zone());
-    block_state.set_end_position(scanner()->location().end_pos);
-    block->set_scope(block_state.FinalizedBlockScope());
-    return block;
-  }
-}
-
-Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
-                                     bool* ok) {
-  int stmt_pos = peek_position();
-  Statement* init = NULL;
-  ZoneList<const AstRawString*> bound_names(1, zone());
-  bool bound_names_are_lexical = false;
-
-  // Create an in-between scope for let-bound iteration variables.
-  BlockState for_state(&scope_state_);
-  Expect(Token::FOR, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  for_state.set_start_position(scanner()->location().beg_pos);
-  for_state.set_is_hidden();
-  DeclarationParsingResult parsing_result;
-  if (peek() != Token::SEMICOLON) {
-    if (peek() == Token::VAR || peek() == Token::CONST ||
-        (peek() == Token::LET && IsNextLetKeyword())) {
-      ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
-                                CHECK_OK);
-
-      ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
-      int each_beg_pos = scanner()->location().beg_pos;
-      int each_end_pos = scanner()->location().end_pos;
-
-      if (CheckInOrOf(&mode, ok)) {
-        if (!*ok) return nullptr;
-        if (parsing_result.declarations.length() != 1) {
-          ReportMessageAt(parsing_result.bindings_loc,
-                          MessageTemplate::kForInOfLoopMultiBindings,
-                          ForEachStatement::VisitModeString(mode));
-          *ok = false;
-          return nullptr;
-        }
-        DeclarationParsingResult::Declaration& decl =
-            parsing_result.declarations[0];
-        if (parsing_result.first_initializer_loc.IsValid() &&
-            (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
-             IsLexicalVariableMode(parsing_result.descriptor.mode) ||
-             !decl.pattern->IsVariableProxy() || allow_harmony_for_in())) {
-          // Only increment the use count if we would have let this through
-          // without the flag.
-          if (allow_harmony_for_in()) {
-            ++use_counts_[v8::Isolate::kForInInitializer];
-          }
-          ReportMessageAt(parsing_result.first_initializer_loc,
-                          MessageTemplate::kForInOfLoopInitializer,
-                          ForEachStatement::VisitModeString(mode));
-          *ok = false;
-          return nullptr;
-        }
-
-        Block* init_block = nullptr;
-        bound_names_are_lexical =
-            IsLexicalVariableMode(parsing_result.descriptor.mode);
-
-        // special case for legacy for (var ... = ... in ...)
-        if (!bound_names_are_lexical && decl.pattern->IsVariableProxy() &&
-            decl.initializer != nullptr) {
-          DCHECK(!allow_harmony_for_in());
-          ++use_counts_[v8::Isolate::kForInInitializer];
-          const AstRawString* name =
-              decl.pattern->AsVariableProxy()->raw_name();
-          VariableProxy* single_var = NewUnresolved(name);
-          init_block = factory()->NewBlock(
-              nullptr, 2, true, parsing_result.descriptor.declaration_pos);
-          init_block->statements()->Add(
-              factory()->NewExpressionStatement(
-                  factory()->NewAssignment(Token::ASSIGN, single_var,
-                                           decl.initializer, kNoSourcePosition),
-                  kNoSourcePosition),
-              zone());
-        }
-
-        // Rewrite a for-in/of statement of the form
-        //
-        //   for (let/const/var x in/of e) b
-        //
-        // into
-        //
-        //   {
-        //     <let x' be a temporary variable>
-        //     for (x' in/of e) {
-        //       let/const/var x;
-        //       x = x';
-        //       b;
-        //     }
-        //     let x;  // for TDZ
-        //   }
-
-        Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
-        ForEachStatement* loop =
-            factory()->NewForEachStatement(mode, labels, stmt_pos);
-        Target target(&this->target_stack_, loop);
-
-        int each_keyword_position = scanner()->location().beg_pos;
-
-        Expression* enumerable;
-        if (mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
-          RewriteNonPattern(&classifier, CHECK_OK);
-        } else {
-          enumerable = ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-
-
-        Block* body_block =
-            factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
-
-        Statement* final_loop;
-        {
-          ReturnExprScope no_tail_calls(function_state_,
-                                        ReturnExprContext::kInsideForInOfBody);
-          BlockState block_state(&scope_state_);
-          block_state.set_start_position(scanner()->location().beg_pos);
-
-          Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-
-          auto each_initialization_block =
-              factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
-          {
-            auto descriptor = parsing_result.descriptor;
-            descriptor.declaration_pos = kNoSourcePosition;
-            descriptor.initialization_pos = kNoSourcePosition;
-            decl.initializer = factory()->NewVariableProxy(temp);
-
-            bool is_for_var_of =
-                mode == ForEachStatement::ITERATE &&
-                parsing_result.descriptor.mode == VariableMode::VAR;
-
-            PatternRewriter::DeclareAndInitializeVariables(
-                each_initialization_block, &descriptor, &decl,
-                bound_names_are_lexical || is_for_var_of ? &bound_names
-                                                         : nullptr,
-                CHECK_OK);
-
-            // Annex B.3.5 prohibits the form
-            // `try {} catch(e) { for (var e of {}); }`
-            // So if we are parsing a statement like `for (var ... of ...)`
-            // we need to walk up the scope chain and look for catch scopes
-            // which have a simple binding, then compare their binding against
-            // all of the names declared in the init of the for-of we're
-            // parsing.
-            if (is_for_var_of) {
-              Scope* catch_scope = scope();
-              while (catch_scope != nullptr &&
-                     !catch_scope->is_declaration_scope()) {
-                if (catch_scope->is_catch_scope()) {
-                  auto name = catch_scope->catch_variable_name();
-                  if (name !=
-                      ast_value_factory()
-                          ->dot_catch_string()) {  // i.e. is a simple binding
-                    if (bound_names.Contains(name)) {
-                      ReportMessageAt(parsing_result.bindings_loc,
-                                      MessageTemplate::kVarRedeclaration, name);
-                      *ok = false;
-                      return nullptr;
-                    }
-                  }
-                }
-                catch_scope = catch_scope->outer_scope();
-              }
-            }
-          }
-
-          body_block->statements()->Add(each_initialization_block, zone());
-          body_block->statements()->Add(body, zone());
-          VariableProxy* temp_proxy =
-              factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
-          final_loop = InitializeForEachStatement(
-              loop, temp_proxy, enumerable, body_block, each_keyword_position);
-          block_state.set_end_position(scanner()->location().end_pos);
-          body_block->set_scope(block_state.FinalizedBlockScope());
-        }
-
-        // Create a TDZ for any lexically-bound names.
-        if (bound_names_are_lexical) {
-          DCHECK_NULL(init_block);
-
-          init_block =
-              factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
-
-          for (int i = 0; i < bound_names.length(); ++i) {
-            // TODO(adamk): This needs to be some sort of special
-            // INTERNAL variable that's invisible to the debugger
-            // but visible to everything else.
-            Declaration* tdz_decl = DeclareVariable(
-                bound_names[i], LET, kNoSourcePosition, CHECK_OK);
-            tdz_decl->proxy()->var()->set_initializer_position(position());
-          }
-        }
-
-        for_state.set_end_position(scanner()->location().end_pos);
-        Scope* for_scope = for_state.FinalizedBlockScope();
-        // Parsed for-in loop w/ variable declarations.
-        if (init_block != nullptr) {
-          init_block->statements()->Add(final_loop, zone());
-          init_block->set_scope(for_scope);
-          return init_block;
-        } else {
-          DCHECK_NULL(for_scope);
-          return final_loop;
-        }
-      } else {
-        bound_names_are_lexical =
-            IsLexicalVariableMode(parsing_result.descriptor.mode);
-        init = parsing_result.BuildInitializationBlock(
-            bound_names_are_lexical ? &bound_names : nullptr, CHECK_OK);
-      }
-    } else {
-      int lhs_beg_pos = peek_position();
-      ExpressionClassifier classifier(this);
-      Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
-      int lhs_end_pos = scanner()->location().end_pos;
-      ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
-
-      bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
-      bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
-                                              expression->IsObjectLiteral());
-
-      if (is_destructuring) {
-        ValidateAssignmentPattern(&classifier, CHECK_OK);
-      } else {
-        RewriteNonPattern(&classifier, CHECK_OK);
-      }
-
-      if (is_for_each) {
-        if (!is_destructuring) {
-          expression = this->CheckAndRewriteReferenceExpression(
-              expression, lhs_beg_pos, lhs_end_pos,
-              MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
-        }
-
-        ForEachStatement* loop =
-            factory()->NewForEachStatement(mode, labels, stmt_pos);
-        Target target(&this->target_stack_, loop);
-
-        int each_keyword_position = scanner()->location().beg_pos;
-
-        Expression* enumerable;
-        if (mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
-          RewriteNonPattern(&classifier, CHECK_OK);
-        } else {
-          enumerable = ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-
-        // For legacy compat reasons, give for loops similar treatment to
-        // if statements in allowing a function declaration for a body
-        Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-        Statement* final_loop = InitializeForEachStatement(
-            loop, expression, enumerable, body, each_keyword_position);
-
-        DCHECK_NULL(for_state.FinalizedBlockScope());
-        return final_loop;
-
-      } else {
-        init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
-      }
-    }
-  }
-
-  // Standard 'for' loop
-  ForStatement* loop = factory()->NewForStatement(labels, stmt_pos);
-  Target target(&this->target_stack_, loop);
-
-  // Parsed initializer at this point.
-  Expect(Token::SEMICOLON, CHECK_OK);
-
-  Expression* cond = NULL;
-  Statement* next = NULL;
-  Statement* body = NULL;
-
-  // If there are let bindings, then condition and the next statement of the
-  // for loop must be parsed in a new scope.
-  Scope* inner_scope = scope();
-  // TODO(verwaest): Allocate this through a ScopeState as well.
-  if (bound_names_are_lexical && bound_names.length() > 0) {
-    inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
-    inner_scope->set_start_position(scanner()->location().beg_pos);
-  }
-  {
-    BlockState block_state(&scope_state_, inner_scope);
-
-    if (peek() != Token::SEMICOLON) {
-      cond = ParseExpression(true, CHECK_OK);
-    }
-    Expect(Token::SEMICOLON, CHECK_OK);
-
-    if (peek() != Token::RPAREN) {
-      Expression* exp = ParseExpression(true, CHECK_OK);
-      next = factory()->NewExpressionStatement(exp, exp->position());
-    }
-    Expect(Token::RPAREN, CHECK_OK);
-
-    body = ParseScopedStatement(NULL, true, CHECK_OK);
-  }
-
-  Statement* result = NULL;
-  if (bound_names_are_lexical && bound_names.length() > 0) {
-    result = DesugarLexicalBindingsInForStatement(
-        inner_scope, parsing_result.descriptor.mode, &bound_names, loop, init,
-        cond, next, body, CHECK_OK);
-    for_state.set_end_position(scanner()->location().end_pos);
-  } else {
-    for_state.set_end_position(scanner()->location().end_pos);
-    Scope* for_scope = for_state.FinalizedBlockScope();
-    if (for_scope) {
-      // Rewrite a for statement of the form
-      //   for (const x = i; c; n) b
-      //
-      // into
-      //
-      //   {
-      //     const x = i;
-      //     for (; c; n) b
-      //   }
-      //
-      // or, desugar
-      //   for (; c; n) b
-      // into
-      //   {
-      //     for (; c; n) b
-      //   }
-      // just in case b introduces a lexical binding some other way, e.g., if b
-      // is a FunctionDeclaration.
-      Block* block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
-      if (init != nullptr) {
-        block->statements()->Add(init, zone());
-      }
-      block->statements()->Add(loop, zone());
-      block->set_scope(for_scope);
-      loop->Initialize(NULL, cond, next, body);
-      result = block;
-    } else {
-      loop->Initialize(init, cond, next, body);
-      result = loop;
-    }
-  }
-  return result;
-}
-
-
-DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
-  // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
-  // contexts this is used as a statement which invokes the debugger as i a
-  // break point is present.
-  // DebuggerStatement ::
-  //   'debugger' ';'
-
-  int pos = peek_position();
-  Expect(Token::DEBUGGER, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return factory()->NewDebuggerStatement(pos);
-}
-
-
-bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
-  if (expression->IsLiteral()) return true;
-  MaterializedLiteral* lit = expression->AsMaterializedLiteral();
-  return lit != NULL && lit->is_simple();
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
-                                              Expression* expression) {
-  Factory* factory = isolate->factory();
-  DCHECK(IsCompileTimeValue(expression));
-  Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
-  ObjectLiteral* object_literal = expression->AsObjectLiteral();
-  if (object_literal != NULL) {
-    DCHECK(object_literal->is_simple());
-    if (object_literal->fast_elements()) {
-      result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
-    } else {
-      result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
-    }
-    result->set(kElementsSlot, *object_literal->constant_properties());
-  } else {
-    ArrayLiteral* array_literal = expression->AsArrayLiteral();
-    DCHECK(array_literal != NULL && array_literal->is_simple());
-    result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
-    result->set(kElementsSlot, *array_literal->constant_elements());
-  }
-  return result;
-}
-
-
-CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
-    Handle<FixedArray> value) {
-  Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
-  return static_cast<LiteralType>(literal_type->value());
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
-  return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
-}
-
-void Parser::ParseArrowFunctionFormalParameters(
+void Parser::AddArrowFunctionFormalParameters(
     ParserFormalParameters* parameters, Expression* expr, int end_pos,
     bool* ok) {
   // ArrowFunctionFormals ::
@@ -3929,8 +2445,8 @@
     Expression* left = binop->left();
     Expression* right = binop->right();
     int comma_pos = binop->position();
-    ParseArrowFunctionFormalParameters(parameters, left, comma_pos,
-                                       CHECK_OK_VOID);
+    AddArrowFunctionFormalParameters(parameters, left, comma_pos,
+                                     CHECK_OK_VOID);
     // LHS of comma expression should be unparenthesized.
     expr = right;
   }
@@ -3958,80 +2474,14 @@
   AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
 }
 
-void Parser::DesugarAsyncFunctionBody(const AstRawString* function_name,
-                                      Scope* scope, ZoneList<Statement*>* body,
-                                      ExpressionClassifier* classifier,
-                                      FunctionKind kind,
-                                      FunctionBodyType body_type,
-                                      bool accept_IN, int pos, bool* ok) {
-  // function async_function() {
-  //   try {
-  //     .generator_object = %CreateGeneratorObject();
-  //     ... function body ...
-  //   } catch (e) {
-  //     return Promise.reject(e);
-  //   }
-  // }
-  scope->ForceContextAllocation();
-  Variable* temp =
-      NewTemporary(ast_value_factory()->dot_generator_object_string());
-  function_state_->set_generator_object_variable(temp);
-
-  Expression* init_generator_variable = factory()->NewAssignment(
-      Token::INIT, factory()->NewVariableProxy(temp),
-      BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
-  body->Add(factory()->NewExpressionStatement(init_generator_variable,
-                                              kNoSourcePosition),
-            zone());
-
-  Block* try_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
-
-  ZoneList<Statement*>* inner_body = try_block->statements();
-
-  Expression* return_value = nullptr;
-  if (body_type == FunctionBodyType::kNormal) {
-    ParseStatementList(inner_body, Token::RBRACE, CHECK_OK_VOID);
-    return_value = factory()->NewUndefinedLiteral(kNoSourcePosition);
-  } else {
-    return_value =
-        ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_VOID);
-    RewriteNonPattern(classifier, CHECK_OK_VOID);
-  }
-
-  return_value = BuildPromiseResolve(return_value, return_value->position());
-  inner_body->Add(
-      factory()->NewReturnStatement(return_value, return_value->position()),
-      zone());
-  body->Add(BuildRejectPromiseOnException(try_block), zone());
-  scope->set_end_position(scanner()->location().end_pos);
-}
-
-DoExpression* Parser::ParseDoExpression(bool* ok) {
-  // AssignmentExpression ::
-  //     do '{' StatementList '}'
-  int pos = peek_position();
-
-  Expect(Token::DO, CHECK_OK);
-  Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
-  Block* block = ParseBlock(nullptr, CHECK_OK);
-  DoExpression* expr = factory()->NewDoExpression(block, result, pos);
-  if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
-    *ok = false;
-    return nullptr;
-  }
-  return expr;
-}
-
-void ParserBaseTraits<Parser>::ParseArrowFunctionFormalParameterList(
+void Parser::DeclareArrowFunctionFormalParameters(
     ParserFormalParameters* parameters, Expression* expr,
     const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
-    const Scope::Snapshot& scope_snapshot, bool* ok) {
+    bool* ok) {
   if (expr->IsEmptyParentheses()) return;
 
-  delegate()->ParseArrowFunctionFormalParameters(
-      parameters, expr, params_loc.end_pos, CHECK_OK_VOID);
-
-  scope_snapshot.Reparent(parameters->scope);
+  AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos,
+                                   CHECK_OK_VOID);
 
   if (parameters->Arity() > Code::kMaxArguments) {
     ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
@@ -4039,23 +2489,25 @@
     return;
   }
 
-  Type::ExpressionClassifier classifier(delegate());
+  ExpressionClassifier classifier(this);
   if (!parameters->is_simple) {
-    classifier.RecordNonSimpleParameter();
+    this->classifier()->RecordNonSimpleParameter();
   }
   for (int i = 0; i < parameters->Arity(); ++i) {
     auto parameter = parameters->at(i);
-    DeclareFormalParameter(parameters->scope, parameter, &classifier);
-    if (!duplicate_loc->IsValid()) {
-      *duplicate_loc = classifier.duplicate_formal_parameter_error().location;
+    DeclareFormalParameter(parameters->scope, parameter);
+    if (!this->classifier()
+             ->is_valid_formal_parameter_list_without_duplicates() &&
+        !duplicate_loc->IsValid()) {
+      *duplicate_loc =
+          this->classifier()->duplicate_formal_parameter_error().location;
     }
   }
   DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
 }
 
-void ParserBaseTraits<Parser>::ReindexLiterals(
-    const ParserFormalParameters& parameters) {
-  if (delegate()->function_state_->materialized_literal_count() > 0) {
+void Parser::ReindexLiterals(const ParserFormalParameters& parameters) {
+  if (function_state_->materialized_literal_count() > 0) {
     AstLiteralReindexer reindexer;
 
     for (const auto p : parameters.params) {
@@ -4063,11 +2515,24 @@
       if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
     }
 
-    DCHECK(reindexer.count() <=
-           delegate()->function_state_->materialized_literal_count());
+    DCHECK(reindexer.count() <= function_state_->materialized_literal_count());
   }
 }
 
+void Parser::PrepareGeneratorVariables(FunctionState* function_state) {
+  // For generators, allocating variables in contexts is currently a win
+  // because it minimizes the work needed to suspend and resume an
+  // activation.  The machine code produced for generators (by full-codegen)
+  // relies on this forced context allocation, but not in an essential way.
+  scope()->ForceContextAllocation();
+
+  // Calling a generator returns a generator object.  That object is stored
+  // in a temporary variable, a definition that is used by "yield"
+  // expressions.
+  Variable* temp =
+      NewTemporary(ast_value_factory()->dot_generator_object_string());
+  function_state->set_generator_object_variable(temp);
+}
 
 FunctionLiteral* Parser::ParseFunctionLiteral(
     const AstRawString* function_name, Scanner::Location function_name_location,
@@ -4119,7 +2584,9 @@
   // These are all things we can know at this point, without looking at the
   // function itself.
 
-  // In addition, we need to distinguish between these cases:
+  // We separate between lazy parsing top level functions and lazy parsing inner
+  // functions, because the latter needs to do more work. In particular, we need
+  // to track unresolved variables to distinguish between these cases:
   // (function foo() {
   //   bar = function() { return 1; }
   //  })();
@@ -4131,17 +2598,18 @@
 
   // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
   // parenthesis before the function means that it will be called
-  // immediately). The inner function *must* be parsed eagerly to resolve the
-  // possible reference to the variable in foo's scope. However, it's possible
-  // that it will be compiled lazily.
+  // immediately). bar can be parsed lazily, but we need to parse it in a mode
+  // that tracks unresolved variables.
+  DCHECK_IMPLIES(mode() == PARSE_LAZILY, FLAG_lazy);
+  DCHECK_IMPLIES(mode() == PARSE_LAZILY, allow_lazy());
+  DCHECK_IMPLIES(mode() == PARSE_LAZILY, extension_ == nullptr);
 
-  // To make this additional case work, both Parser and PreParser implement a
-  // logic where only top-level functions will be parsed lazily.
-  bool is_lazily_parsed = mode() == PARSE_LAZILY &&
-                          this->scope()->AllowsLazyParsing() &&
-                          !function_state_->next_function_is_parenthesized();
+  bool is_lazy_top_level_function =
+      mode() == PARSE_LAZILY &&
+      eager_compile_hint == FunctionLiteral::kShouldLazyCompile &&
+      scope()->AllowsLazyParsingWithoutUnresolvedVariables();
 
-  // Determine whether the function body can be discarded after parsing.
+  // Determine whether we can still lazy parse the inner function.
   // The preconditions are:
   // - Lazy compilation has to be enabled.
   // - Neither V8 natives nor native function declarations can be allowed,
@@ -4156,18 +2624,20 @@
   // - The function literal shouldn't be hinted to eagerly compile.
   // - For asm.js functions the body needs to be available when module
   //   validation is active, because we examine the entire module at once.
+
+  // Inner functions will be parsed using a temporary Zone. After parsing, we
+  // will migrate unresolved variable into a Scope in the main Zone.
+  // TODO(marja): Refactor parsing modes: simplify this.
   bool use_temp_zone =
-      !is_lazily_parsed && FLAG_lazy && !allow_natives() &&
-      extension_ == NULL && allow_lazy() &&
-      function_type == FunctionLiteral::kDeclaration &&
+      allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
       eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
       !(FLAG_validate_asm && scope()->IsAsmModule());
+  bool is_lazy_inner_function =
+      use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
 
-  DeclarationScope* main_scope = nullptr;
-  if (use_temp_zone) {
-    // This Scope lives in the main Zone; we'll migrate data into it later.
-    main_scope = NewFunctionScope(kind);
-  }
+  // This Scope lives in the main zone. We'll migrate data into that zone later.
+  DeclarationScope* scope = NewFunctionScope(kind);
+  SetLanguageMode(scope, language_mode);
 
   ZoneList<Statement*>* body = nullptr;
   int arity = -1;
@@ -4177,6 +2647,32 @@
   bool should_be_used_once_hint = false;
   bool has_duplicate_parameters;
 
+  FunctionState function_state(&function_state_, &scope_state_, scope);
+#ifdef DEBUG
+  scope->SetScopeName(function_name);
+#endif
+
+  ExpressionClassifier formals_classifier(this, &duplicate_finder);
+
+  if (is_generator) PrepareGeneratorVariables(&function_state);
+
+  Expect(Token::LPAREN, CHECK_OK);
+  int start_position = scanner()->location().beg_pos;
+  this->scope()->set_start_position(start_position);
+  ParserFormalParameters formals(scope);
+  ParseFormalParameterList(&formals, CHECK_OK);
+  arity = formals.Arity();
+  Expect(Token::RPAREN, CHECK_OK);
+  int formals_end_position = scanner()->location().end_pos;
+
+  CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
+                         formals_end_position, CHECK_OK);
+  Expect(Token::LBRACE, CHECK_OK);
+  // Don't include the rest parameter into the function's formal parameter
+  // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+  // which says whether we need to create an arguments adaptor frame).
+  if (formals.has_rest) arity--;
+
   {
     // Temporary zones can nest. When we migrate free variables (see below), we
     // need to recreate them in the previous Zone.
@@ -4187,94 +2683,58 @@
     // new temporary zone if the preconditions are satisfied, and ensures that
     // the previous zone is always restored after parsing the body. To be able
     // to do scope analysis correctly after full parsing, we migrate needed
-    // information from scope into main_scope when the function has been parsed.
+    // information when the function is parsed.
     Zone temp_zone(zone()->allocator());
     DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
-
-    DeclarationScope* scope = NewFunctionScope(kind);
-    SetLanguageMode(scope, language_mode);
-    if (!use_temp_zone) {
-      main_scope = scope;
-    } else {
-      DCHECK(main_scope->zone() != scope->zone());
-    }
-
-    FunctionState function_state(&function_state_, &scope_state_, scope, kind);
 #ifdef DEBUG
-    scope->SetScopeName(function_name);
+    if (use_temp_zone) scope->set_needs_migration();
 #endif
-    ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
-    if (is_generator) {
-      // For generators, allocating variables in contexts is currently a win
-      // because it minimizes the work needed to suspend and resume an
-      // activation.  The machine code produced for generators (by full-codegen)
-      // relies on this forced context allocation, but not in an essential way.
-      this->scope()->ForceContextAllocation();
-
-      // Calling a generator returns a generator object.  That object is stored
-      // in a temporary variable, a definition that is used by "yield"
-      // expressions. This also marks the FunctionState as a generator.
-      Variable* temp =
-          NewTemporary(ast_value_factory()->dot_generator_object_string());
-      function_state.set_generator_object_variable(temp);
-    }
-
-    Expect(Token::LPAREN, CHECK_OK);
-    int start_position = scanner()->location().beg_pos;
-    this->scope()->set_start_position(start_position);
-    ParserFormalParameters formals(scope);
-    ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
-    arity = formals.Arity();
-    Expect(Token::RPAREN, CHECK_OK);
-    int formals_end_position = scanner()->location().end_pos;
-
-    CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
-                           formals_end_position, CHECK_OK);
-    Expect(Token::LBRACE, CHECK_OK);
-    // Don't include the rest parameter into the function's formal parameter
-    // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
-    // which says whether we need to create an arguments adaptor frame).
-    if (formals.has_rest) arity--;
-
-    // Eager or lazy parse?
-    // If is_lazily_parsed, we'll parse lazy. If we can set a bookmark, we'll
-    // pass it to SkipLazyFunctionBody, which may use it to abort lazy
-    // parsing if it suspect that wasn't a good idea. If so, or if we didn't
-    // try to lazy parse in the first place, we'll have to parse eagerly.
-    Scanner::BookmarkScope bookmark(scanner());
-    if (is_lazily_parsed) {
-      Scanner::BookmarkScope* maybe_bookmark =
-          bookmark.Set() ? &bookmark : nullptr;
-      SkipLazyFunctionBody(&materialized_literal_count,
-                           &expected_property_count, /*CHECK_OK*/ ok,
-                           maybe_bookmark);
+    // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
+    // lazily. We'll call SkipLazyFunctionBody, which may decide to abort lazy
+    // parsing if it suspects that wasn't a good idea. If so (in which case the
+    // parser is expected to have backtracked), or if we didn't try to lazy
+    // parse in the first place, we'll have to parse eagerly.
+    if (is_lazy_top_level_function || is_lazy_inner_function) {
+      Scanner::BookmarkScope bookmark(scanner());
+      bookmark.Set();
+      LazyParsingResult result = SkipLazyFunctionBody(
+          &materialized_literal_count, &expected_property_count,
+          is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
 
       materialized_literal_count += formals.materialized_literals_count +
                                     function_state.materialized_literal_count();
 
-      if (bookmark.HasBeenReset()) {
+      if (result == kLazyParsingAborted) {
+        DCHECK(is_lazy_top_level_function);
+        bookmark.Apply();
         // Trigger eager (re-)parsing, just below this block.
-        is_lazily_parsed = false;
+        is_lazy_top_level_function = false;
 
         // This is probably an initialization function. Inform the compiler it
         // should also eager-compile this function, and that we expect it to be
         // used once.
         eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
         should_be_used_once_hint = true;
+        scope->ResetAfterPreparsing(ast_value_factory(), true);
+        zone_scope.Reset();
+        use_temp_zone = false;
       }
     }
-    if (!is_lazily_parsed) {
+
+    if (!is_lazy_top_level_function && !is_lazy_inner_function) {
       body = ParseEagerFunctionBody(function_name, pos, formals, kind,
                                     function_type, CHECK_OK);
 
       materialized_literal_count = function_state.materialized_literal_count();
       expected_property_count = function_state.expected_property_count();
-      if (use_temp_zone) {
-        // If the preconditions are correct the function body should never be
-        // accessed, but do this anyway for better behaviour if they're wrong.
-        body = nullptr;
-      }
+    }
+
+    if (use_temp_zone || is_lazy_top_level_function) {
+      // If the preconditions are correct the function body should never be
+      // accessed, but do this anyway for better behaviour if they're wrong.
+      body = nullptr;
+      scope->AnalyzePartially(&previous_zone_ast_node_factory);
     }
 
     // Parsing the body may change the language mode in our scope.
@@ -4286,13 +2746,13 @@
                       function_name_location, CHECK_OK);
     const bool allow_duplicate_parameters =
         is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
-    ValidateFormalParameters(&formals_classifier, language_mode,
-                             allow_duplicate_parameters, CHECK_OK);
+    ValidateFormalParameters(language_mode, allow_duplicate_parameters,
+                             CHECK_OK);
 
     if (is_strict(language_mode)) {
       CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
                               CHECK_OK);
-      CheckDecimalLiteralWithLeadingZero(use_counts_, scope->start_position(),
+      CheckDecimalLiteralWithLeadingZero(scope->start_position(),
                                          scope->end_position());
     }
     CheckConflictingVarDeclarations(scope, CHECK_OK);
@@ -4302,12 +2762,7 @@
       RewriteDestructuringAssignments();
     }
     has_duplicate_parameters =
-      !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
-
-    if (use_temp_zone) {
-      DCHECK(main_scope != scope);
-      scope->AnalyzePartially(main_scope, &previous_zone_ast_node_factory);
-    }
+        !classifier()->is_valid_formal_parameter_list_without_duplicates();
   }  // DiscardableZoneScope goes out of scope.
 
   FunctionLiteral::ParameterFlag duplicate_parameters =
@@ -4316,53 +2771,31 @@
 
   // Note that the FunctionLiteral needs to be created in the main Zone again.
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
-      function_name, main_scope, body, materialized_literal_count,
+      function_name, scope, body, materialized_literal_count,
       expected_property_count, arity, duplicate_parameters, function_type,
-      eager_compile_hint, kind, pos);
+      eager_compile_hint, pos);
   function_literal->set_function_token_position(function_token_pos);
   if (should_be_used_once_hint)
     function_literal->set_should_be_used_once_hint();
 
-  if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
+  if (should_infer_name) {
+    DCHECK_NOT_NULL(fni_);
+    fni_->AddFunction(function_literal);
+  }
   return function_literal;
 }
 
-Expression* Parser::ParseAsyncFunctionExpression(bool* ok) {
-  // AsyncFunctionDeclaration ::
-  //   async [no LineTerminator here] function ( FormalParameters[Await] )
-  //       { AsyncFunctionBody }
-  //
-  //   async [no LineTerminator here] function BindingIdentifier[Await]
-  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
-  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
-  int pos = position();
-  Expect(Token::FUNCTION, CHECK_OK);
-  bool is_strict_reserved = false;
-  const AstRawString* name = nullptr;
-  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
-
-  if (peek_any_identifier()) {
-    type = FunctionLiteral::kNamedExpression;
-    name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
-                                               &is_strict_reserved, CHECK_OK);
-  }
-  return ParseFunctionLiteral(name, scanner()->location(),
-                              is_strict_reserved ? kFunctionNameIsStrictReserved
-                                                 : kFunctionNameValidityUnknown,
-                              FunctionKind::kAsyncFunction, pos, type,
-                              language_mode(), CHECK_OK);
-}
-
-void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
-                                  int* expected_property_count, bool* ok,
-                                  Scanner::BookmarkScope* bookmark) {
-  DCHECK_IMPLIES(bookmark, bookmark->HasBeenSet());
+Parser::LazyParsingResult Parser::SkipLazyFunctionBody(
+    int* materialized_literal_count, int* expected_property_count,
+    bool is_inner_function, bool may_abort, bool* ok) {
   if (produce_cached_parse_data()) CHECK(log_);
 
   int function_block_pos = position();
-  DeclarationScope* scope = this->scope()->AsDeclarationScope();
+  DeclarationScope* scope = function_state_->scope();
   DCHECK(scope->is_function_scope());
-  if (consume_cached_parse_data() && !cached_parse_data_->rejected()) {
+  // Inner functions are not part of the cached data.
+  if (!is_inner_function && consume_cached_parse_data() &&
+      !cached_parse_data_->rejected()) {
     // If we have cached data, we use it to skip parsing the function body. The
     // data contains the information we need to construct the lazy function.
     FunctionEntry entry =
@@ -4374,14 +2807,14 @@
       scanner()->SeekForward(entry.end_pos() - 1);
 
       scope->set_end_position(entry.end_pos());
-      Expect(Token::RBRACE, CHECK_OK_VOID);
+      Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
       total_preparse_skipped_ += scope->end_position() - function_block_pos;
       *materialized_literal_count = entry.literal_count();
       *expected_property_count = entry.property_count();
       SetLanguageMode(scope, entry.language_mode());
       if (entry.uses_super_property()) scope->RecordSuperPropertyUsage();
       if (entry.calls_eval()) scope->RecordEvalCall();
-      return;
+      return kLazyParsingComplete;
     }
     cached_parse_data_->Reject();
   }
@@ -4389,32 +2822,32 @@
   // AST. This gathers the data needed to build a lazy function.
   SingletonLogger logger;
   PreParser::PreParseResult result =
-      ParseLazyFunctionBodyWithPreParser(&logger, bookmark);
-  if (bookmark && bookmark->HasBeenReset()) {
-    return;  // Return immediately if pre-parser devided to abort parsing.
-  }
+      ParseLazyFunctionBodyWithPreParser(&logger, is_inner_function, may_abort);
+
+  // Return immediately if pre-parser decided to abort parsing.
+  if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
   if (result == PreParser::kPreParseStackOverflow) {
     // Propagate stack overflow.
     set_stack_overflow();
     *ok = false;
-    return;
+    return kLazyParsingComplete;
   }
   if (logger.has_error()) {
     ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
                     logger.message(), logger.argument_opt(),
                     logger.error_type());
     *ok = false;
-    return;
+    return kLazyParsingComplete;
   }
   scope->set_end_position(logger.end());
-  Expect(Token::RBRACE, CHECK_OK_VOID);
+  Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
   total_preparse_skipped_ += scope->end_position() - function_block_pos;
   *materialized_literal_count = logger.literals();
   *expected_property_count = logger.properties();
   SetLanguageMode(scope, logger.language_mode());
   if (logger.uses_super_property()) scope->RecordSuperPropertyUsage();
   if (logger.calls_eval()) scope->RecordEvalCall();
-  if (produce_cached_parse_data()) {
+  if (!is_inner_function && produce_cached_parse_data()) {
     DCHECK(log_);
     // Position right after terminal '}'.
     int body_end = scanner()->location().end_pos;
@@ -4422,6 +2855,7 @@
                       *expected_property_count, language_mode(),
                       scope->uses_super_property(), scope->calls_eval());
   }
+  return kLazyParsingComplete;
 }
 
 
@@ -4438,9 +2872,9 @@
           Token::EQ_STRICT, factory()->NewVariableProxy(var),
           factory()->NewNullLiteral(kNoSourcePosition), kNoSourcePosition),
       kNoSourcePosition);
-  Expression* throw_type_error = this->NewThrowTypeError(
-      MessageTemplate::kNonCoercible, ast_value_factory()->empty_string(),
-      kNoSourcePosition);
+  Expression* throw_type_error =
+      NewThrowTypeError(MessageTemplate::kNonCoercible,
+                        ast_value_factory()->empty_string(), kNoSourcePosition);
   IfStatement* if_statement = factory()->NewIfStatement(
       condition,
       factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
@@ -4495,7 +2929,6 @@
     if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
     DeclarationDescriptor descriptor;
     descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
-    descriptor.parser = this;
     descriptor.scope = scope();
     descriptor.hoist_scope = nullptr;
     descriptor.mode = LET;
@@ -4544,8 +2977,8 @@
     BlockState block_state(&scope_state_, param_scope);
     DeclarationParsingResult::Declaration decl(
         parameter.pattern, parameter.initializer_end_position, initial_value);
-    PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
-                                                   &decl, nullptr, CHECK_OK);
+    PatternRewriter::DeclareAndInitializeVariables(
+        this, param_block, &descriptor, &decl, nullptr, CHECK_OK);
 
     if (param_block != init_block) {
       param_scope = block_state.FinalizedBlockScope();
@@ -4558,28 +2991,74 @@
   return init_block;
 }
 
-Block* Parser::BuildRejectPromiseOnException(Block* block) {
-  // try { <block> } catch (error) { return Promise.reject(error); }
-  Block* try_block = block;
+Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
+  // .promise = %AsyncFunctionPromiseCreate();
+  // try {
+  //   <inner_block>
+  // } catch (.catch) {
+  //   %RejectPromise(.promise, .catch);
+  //   return .promise;
+  // } finally {
+  //   %AsyncFunctionPromiseRelease(.promise);
+  // }
+  Block* result = factory()->NewBlock(nullptr, 2, true, kNoSourcePosition);
+
+  // .promise = %AsyncFunctionPromiseCreate();
+  Statement* set_promise;
+  {
+    Expression* create_promise = factory()->NewCallRuntime(
+        Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
+        new (zone()) ZoneList<Expression*>(0, zone()), kNoSourcePosition);
+    Assignment* assign_promise = factory()->NewAssignment(
+        Token::INIT, factory()->NewVariableProxy(PromiseVariable()),
+        create_promise, kNoSourcePosition);
+    set_promise =
+        factory()->NewExpressionStatement(assign_promise, kNoSourcePosition);
+  }
+  result->statements()->Add(set_promise, zone());
+
+  // catch (.catch) { return %RejectPromise(.promise, .catch), .promise }
   Scope* catch_scope = NewScope(CATCH_SCOPE);
   catch_scope->set_is_hidden();
   Variable* catch_variable =
       catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                kCreatedInitialized, Variable::NORMAL);
+                                kCreatedInitialized, NORMAL_VARIABLE);
   Block* catch_block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
 
-  Expression* promise_reject = BuildPromiseReject(
+  Expression* promise_reject = BuildRejectPromise(
       factory()->NewVariableProxy(catch_variable), kNoSourcePosition);
-
   ReturnStatement* return_promise_reject =
       factory()->NewReturnStatement(promise_reject, kNoSourcePosition);
   catch_block->statements()->Add(return_promise_reject, zone());
-  TryStatement* try_catch_statement = factory()->NewTryCatchStatement(
-      try_block, catch_scope, catch_variable, catch_block, kNoSourcePosition);
 
-  block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
-  block->statements()->Add(try_catch_statement, zone());
-  return block;
+  TryStatement* try_catch_statement =
+      factory()->NewTryCatchStatementForAsyncAwait(inner_block, catch_scope,
+                                                   catch_variable, catch_block,
+                                                   kNoSourcePosition);
+
+  // There is no TryCatchFinally node, so wrap it in an outer try/finally
+  Block* outer_try_block =
+      factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+  outer_try_block->statements()->Add(try_catch_statement, zone());
+
+  // finally { %AsyncFunctionPromiseRelease(.promise) }
+  Block* finally_block =
+      factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+  {
+    ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+    args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
+    Expression* call_promise_release = factory()->NewCallRuntime(
+        Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, args, kNoSourcePosition);
+    Statement* promise_release = factory()->NewExpressionStatement(
+        call_promise_release, kNoSourcePosition);
+    finally_block->statements()->Add(promise_release, zone());
+  }
+
+  Statement* try_finally_statement = factory()->NewTryFinallyStatement(
+      outer_try_block, finally_block, kNoSourcePosition);
+
+  result->statements()->Add(try_finally_statement, zone());
+  return result;
 }
 
 Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
@@ -4593,26 +3072,68 @@
                                    pos);
 }
 
-Expression* Parser::BuildPromiseResolve(Expression* value, int pos) {
-  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
+  // %ResolvePromise(.promise, value), .promise
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
   args->Add(value, zone());
-  return factory()->NewCallRuntime(Context::PROMISE_CREATE_RESOLVED_INDEX, args,
-                                   pos);
+  Expression* call_runtime =
+      factory()->NewCallRuntime(Context::PROMISE_RESOLVE_INDEX, args, pos);
+  return factory()->NewBinaryOperation(
+      Token::COMMA, call_runtime,
+      factory()->NewVariableProxy(PromiseVariable()), pos);
 }
 
-Expression* Parser::BuildPromiseReject(Expression* value, int pos) {
-  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
+  // %RejectPromiseNoDebugEvent(.promise, value, true), .promise
+  // The NoDebugEvent variant disables the additional debug event for the
+  // rejection since a debug event already happened for the exception that got
+  // us here.
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
   args->Add(value, zone());
-  return factory()->NewCallRuntime(Context::PROMISE_CREATE_REJECTED_INDEX, args,
-                                   pos);
+  Expression* call_runtime = factory()->NewCallRuntime(
+      Context::REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, args, pos);
+  return factory()->NewBinaryOperation(
+      Token::COMMA, call_runtime,
+      factory()->NewVariableProxy(PromiseVariable()), pos);
+}
+
+Variable* Parser::PromiseVariable() {
+  // Based on the various compilation paths, there are many different code
+  // paths which may be the first to access the Promise temporary. Whichever
+  // comes first should create it and stash it in the FunctionState.
+  Variable* promise = function_state_->promise_variable();
+  if (function_state_->promise_variable() == nullptr) {
+    promise = scope()->NewTemporary(ast_value_factory()->empty_string());
+    function_state_->set_promise_variable(promise);
+  }
+  return promise;
+}
+
+Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
+  Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
+  VariableProxy* init_proxy =
+      factory()->NewVariableProxy(function_state_->generator_object_variable());
+  Assignment* assignment = factory()->NewAssignment(
+      Token::INIT, init_proxy, allocation, kNoSourcePosition);
+  VariableProxy* get_proxy =
+      factory()->NewVariableProxy(function_state_->generator_object_variable());
+  // The position of the yield is important for reporting the exception
+  // caused by calling the .throw method on a generator suspended at the
+  // initial yield (i.e. right after generator instantiation).
+  return factory()->NewYield(get_proxy, assignment, scope()->start_position(),
+                             Yield::kOnExceptionThrow);
 }
 
 ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
     const AstRawString* function_name, int pos,
     const ParserFormalParameters& parameters, FunctionKind kind,
     FunctionLiteral::FunctionType function_type, bool* ok) {
-  // Everything inside an eagerly parsed function will be parsed eagerly
-  // (see comment above).
+  // Everything inside an eagerly parsed function will be parsed eagerly (see
+  // comment above). Lazy inner functions are handled separately and they won't
+  // require the mode to be PARSE_LAZILY (see ParseFunctionLiteral).
+  // TODO(marja): Refactor parsing modes: remove this.
   ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
   ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
 
@@ -4657,26 +3178,10 @@
 
       Block* try_block =
           factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
-
-      {
-        Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
-        VariableProxy* init_proxy = factory()->NewVariableProxy(
-            function_state_->generator_object_variable());
-        Assignment* assignment = factory()->NewAssignment(
-            Token::INIT, init_proxy, allocation, kNoSourcePosition);
-        VariableProxy* get_proxy = factory()->NewVariableProxy(
-            function_state_->generator_object_variable());
-        // The position of the yield is important for reporting the exception
-        // caused by calling the .throw method on a generator suspended at the
-        // initial yield (i.e. right after generator instantiation).
-        Yield* yield = factory()->NewYield(get_proxy, assignment,
-                                           scope()->start_position(),
-                                           Yield::kOnExceptionThrow);
-        try_block->statements()->Add(
-            factory()->NewExpressionStatement(yield, kNoSourcePosition),
-            zone());
-      }
-
+      Expression* initial_yield = BuildInitialYield(pos, kind);
+      try_block->statements()->Add(
+          factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+          zone());
       ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
 
       Statement* final_return = factory()->NewReturnStatement(
@@ -4700,16 +3205,15 @@
                 zone());
     } else if (IsAsyncFunction(kind)) {
       const bool accept_IN = true;
-      DesugarAsyncFunctionBody(function_name, inner_scope, body, nullptr, kind,
-                               FunctionBodyType::kNormal, accept_IN, pos,
-                               CHECK_OK);
+      ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
+                             accept_IN, pos, CHECK_OK);
     } else {
       ParseStatementList(body, Token::RBRACE, CHECK_OK);
     }
 
     if (IsSubclassConstructor(kind)) {
-      body->Add(factory()->NewReturnStatement(
-                    this->ThisExpression(kNoSourcePosition), kNoSourcePosition),
+      body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
+                                              kNoSourcePosition),
                 zone());
     }
   }
@@ -4726,12 +3230,12 @@
     Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
 
     if (is_sloppy(inner_scope->language_mode())) {
-      InsertSloppyBlockFunctionVarBindings(inner_scope, function_scope,
-                                           CHECK_OK);
+      InsertSloppyBlockFunctionVarBindings(inner_scope);
     }
 
+    // TODO(littledan): Merge the two rejection blocks into one
     if (IsAsyncFunction(kind)) {
-      init_block = BuildRejectPromiseOnException(init_block);
+      init_block = BuildRejectPromiseOnException(init_block, CHECK_OK);
     }
 
     DCHECK_NOT_NULL(init_block);
@@ -4748,31 +3252,42 @@
   } else {
     DCHECK_EQ(inner_scope, function_scope);
     if (is_sloppy(function_scope->language_mode())) {
-      InsertSloppyBlockFunctionVarBindings(function_scope, nullptr, CHECK_OK);
+      InsertSloppyBlockFunctionVarBindings(function_scope);
     }
   }
 
+  if (!IsArrowFunction(kind)) {
+    // Declare arguments after parsing the function since lexical 'arguments'
+    // masks the arguments object. Declare arguments before declaring the
+    // function var since the arguments object masks 'function arguments'.
+    function_scope->DeclareArguments(ast_value_factory());
+  }
+
   if (function_type == FunctionLiteral::kNamedExpression) {
-    // Now that we know the language mode, we can create the const assignment
-    // in the previously reserved spot.
-    DCHECK_EQ(function_scope, scope());
-    Variable* fvar = function_scope->DeclareFunctionVar(function_name);
-    VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
-    result->Set(kFunctionNameAssignmentIndex,
-                factory()->NewExpressionStatement(
-                    factory()->NewAssignment(Token::INIT, fproxy,
-                                             factory()->NewThisFunction(pos),
-                                             kNoSourcePosition),
-                    kNoSourcePosition));
+    Statement* statement;
+    if (function_scope->LookupLocal(function_name) == nullptr) {
+      // Now that we know the language mode, we can create the const assignment
+      // in the previously reserved spot.
+      DCHECK_EQ(function_scope, scope());
+      Variable* fvar = function_scope->DeclareFunctionVar(function_name);
+      VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
+      statement = factory()->NewExpressionStatement(
+          factory()->NewAssignment(Token::INIT, fproxy,
+                                   factory()->NewThisFunction(pos),
+                                   kNoSourcePosition),
+          kNoSourcePosition);
+    } else {
+      statement = factory()->NewEmptyStatement(kNoSourcePosition);
+    }
+    result->Set(kFunctionNameAssignmentIndex, statement);
   }
 
   MarkCollectedTailCallExpressions();
   return result;
 }
 
-
 PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
-    SingletonLogger* logger, Scanner::BookmarkScope* bookmark) {
+    SingletonLogger* logger, bool is_inner_function, bool may_abort) {
   // This function may be called on a background thread too; record only the
   // main thread preparse times.
   if (pre_parse_timer_ != NULL) {
@@ -4794,209 +3309,337 @@
     SET_ALLOW(harmony_restrictive_declarations);
     SET_ALLOW(harmony_async_await);
     SET_ALLOW(harmony_trailing_commas);
+    SET_ALLOW(harmony_class_fields);
 #undef SET_ALLOW
   }
+  // Aborting inner function preparsing would leave scopes in an inconsistent
+  // state; we don't parse inner functions in the abortable mode anyway.
+  DCHECK(!is_inner_function || !may_abort);
+
+  DeclarationScope* function_scope = function_state_->scope();
   PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
-      language_mode(), function_state_->kind(),
-      scope()->AsDeclarationScope()->has_simple_parameters(), parsing_module_,
-      logger, bookmark, use_counts_);
+      function_scope, parsing_module_, logger, is_inner_function, may_abort,
+      use_counts_);
   if (pre_parse_timer_ != NULL) {
     pre_parse_timer_->Stop();
   }
   return result;
 }
 
-Expression* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
-                                      const AstRawString* name,
-                                      Scanner::Location class_name_location,
-                                      bool name_is_strict_reserved, int pos,
-                                      bool* ok) {
-  // All parts of a ClassDeclaration and ClassExpression are strict code.
-  if (name_is_strict_reserved) {
-    ReportMessageAt(class_name_location,
-                    MessageTemplate::kUnexpectedStrictReserved);
-    *ok = false;
-    return nullptr;
-  }
-  if (IsEvalOrArguments(name)) {
-    ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
-    *ok = false;
-    return nullptr;
-  }
+Expression* Parser::InstallHomeObject(Expression* function_literal,
+                                      Expression* home_object) {
+  Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+  Variable* result_var =
+      scope()->NewTemporary(ast_value_factory()->empty_string());
+  DoExpression* do_expr =
+      factory()->NewDoExpression(do_block, result_var, kNoSourcePosition);
+  Assignment* init = factory()->NewAssignment(
+      Token::ASSIGN, factory()->NewVariableProxy(result_var), function_literal,
+      kNoSourcePosition);
+  do_block->statements()->Add(
+      factory()->NewExpressionStatement(init, kNoSourcePosition), zone());
+  Property* home_object_property = factory()->NewProperty(
+      factory()->NewVariableProxy(result_var),
+      factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition),
+      kNoSourcePosition);
+  Assignment* assignment = factory()->NewAssignment(
+      Token::ASSIGN, home_object_property, home_object, kNoSourcePosition);
+  do_block->statements()->Add(
+      factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
+  return do_expr;
+}
 
-  BlockState block_state(&scope_state_);
+const AstRawString* ClassFieldVariableName(bool is_name,
+                                           AstValueFactory* ast_value_factory,
+                                           int index) {
+  std::string name =
+      ".class-field-" + std::to_string(index) + (is_name ? "-name" : "-func");
+  return ast_value_factory->GetOneByteString(name.c_str());
+}
+
+FunctionLiteral* Parser::SynthesizeClassFieldInitializer(int count) {
+  DCHECK(count > 0);
+  // Makes a function which reads the names and initializers for each class
+  // field out of deterministically named local variables and sets each property
+  // to the result of evaluating its corresponding initializer in turn.
+
+  // This produces a function which looks like
+  // function () {
+  //   this[.class-field-0-name] = .class-field-0-func();
+  //   this[.class-field-1-name] = .class-field-1-func();
+  //   [...]
+  //   this[.class-field-n-name] = .class-field-n-func();
+  //   return this;
+  // }
+  // except that it performs defineProperty, so that instead of '=' it has
+  // %DefineDataPropertyInLiteral(this, .class-field-0-name,
+  // .class-field-0-func(),
+  //   DONT_ENUM, false)
+
   RaiseLanguageMode(STRICT);
+  FunctionKind kind = FunctionKind::kConciseMethod;
+  DeclarationScope* initializer_scope = NewFunctionScope(kind);
+  SetLanguageMode(initializer_scope, language_mode());
+  initializer_scope->set_start_position(scanner()->location().end_pos);
+  initializer_scope->set_end_position(scanner()->location().end_pos);
+  FunctionState initializer_state(&function_state_, &scope_state_,
+                                  initializer_scope);
+  ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(count, zone());
+  for (int i = 0; i < count; ++i) {
+    const AstRawString* name =
+        ClassFieldVariableName(true, ast_value_factory(), i);
+    VariableProxy* name_proxy = scope()->NewUnresolved(factory(), name);
+    const AstRawString* function_name =
+        ClassFieldVariableName(false, ast_value_factory(), i);
+    VariableProxy* function_proxy =
+        scope()->NewUnresolved(factory(), function_name);
+    ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+    args->Add(function_proxy, zone());
+    args->Add(ThisExpression(kNoSourcePosition), zone());
+    Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
+                                                 kNoSourcePosition);
+    ZoneList<Expression*>* define_property_args =
+        new (zone()) ZoneList<Expression*>(5, zone());
+    define_property_args->Add(ThisExpression(kNoSourcePosition), zone());
+    define_property_args->Add(name_proxy, zone());
+    define_property_args->Add(call, zone());
+    define_property_args->Add(
+        factory()->NewNumberLiteral(DONT_ENUM, kNoSourcePosition), zone());
+    define_property_args->Add(
+        factory()->NewNumberLiteral(
+            false,  // TODO(bakkot) function name inference a la class { x =
+                    // function(){}; static y = function(){}; }
+            kNoSourcePosition),
+        zone());
+    body->Add(factory()->NewExpressionStatement(
+                  factory()->NewCallRuntime(
+                      Runtime::kDefineDataProperty,
+                      define_property_args,  // TODO(bakkot) verify that this is
+                      // the same as object_define_property
+                      kNoSourcePosition),
+                  kNoSourcePosition),
+              zone());
+  }
+  body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
+                                          kNoSourcePosition),
+            zone());
+  FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
+      ast_value_factory()->empty_string(), initializer_scope, body,
+      initializer_state.materialized_literal_count(),
+      initializer_state.expected_property_count(), 0,
+      FunctionLiteral::kNoDuplicateParameters,
+      FunctionLiteral::kAnonymousExpression,
+      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+  function_literal->set_is_class_field_initializer(true);
+  function_literal->scope()->set_arity(count);
+  return function_literal;
+}
+
+FunctionLiteral* Parser::InsertClassFieldInitializer(
+    FunctionLiteral* constructor) {
+  Statement* call_initializer = factory()->NewExpressionStatement(
+      CallClassFieldInitializer(
+          constructor->scope(),
+          constructor->scope()->NewUnresolved(
+              factory(), ast_value_factory()->this_string(), kNoSourcePosition,
+              kNoSourcePosition + 4, THIS_VARIABLE)),
+      kNoSourcePosition);
+  constructor->body()->InsertAt(0, call_initializer, zone());
+  return constructor;
+}
+
+// If a class name is specified, this method declares the class variable
+// and sets class_info->proxy to point to that name.
+void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
+                                  ClassInfo* class_info, int class_token_pos,
+                                  bool* ok) {
 #ifdef DEBUG
   scope()->SetScopeName(name);
 #endif
 
-  VariableProxy* proxy = nullptr;
   if (name != nullptr) {
-    proxy = NewUnresolved(name);
-    // TODO(verwaest): declare via block_state.
-    Declaration* declaration =
-        factory()->NewVariableDeclaration(proxy, block_state.scope(), pos);
+    class_info->proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+    Declaration* declaration = factory()->NewVariableDeclaration(
+        class_info->proxy, block_scope, class_token_pos);
     Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
-            DefaultInitializationFlag(CONST), CHECK_OK);
+            Variable::DefaultInitializationFlag(CONST), ok);
+  }
+}
+
+// This method declares a property of the given class.  It updates the
+// following fields of class_info, as appropriate:
+//   - constructor
+//   - static_initializer_var
+//   - instance_field_initializers
+//   - properties
+void Parser::DeclareClassProperty(const AstRawString* class_name,
+                                  ClassLiteralProperty* property,
+                                  ClassInfo* class_info, bool* ok) {
+  if (class_info->has_seen_constructor && class_info->constructor == nullptr) {
+    class_info->constructor = GetPropertyValue(property)->AsFunctionLiteral();
+    DCHECK_NOT_NULL(class_info->constructor);
+    class_info->constructor->set_raw_name(
+        class_name != nullptr ? class_name
+                              : ast_value_factory()->empty_string());
+    return;
   }
 
-  Expression* extends = nullptr;
-  if (Check(Token::EXTENDS)) {
-    block_state.set_start_position(scanner()->location().end_pos);
-    ExpressionClassifier extends_classifier(this);
-    extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
-    CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
-    RewriteNonPattern(&extends_classifier, CHECK_OK);
-    if (classifier != nullptr) {
-      classifier->Accumulate(&extends_classifier,
-                             ExpressionClassifier::ExpressionProductions);
-    }
-  } else {
-    block_state.set_start_position(scanner()->location().end_pos);
-  }
-
-
-  ClassLiteralChecker checker(this);
-  ZoneList<ObjectLiteral::Property*>* properties = NewPropertyList(4, zone());
-  FunctionLiteral* constructor = nullptr;
-  bool has_seen_constructor = false;
-
-  Expect(Token::LBRACE, CHECK_OK);
-
-  const bool has_extends = extends != nullptr;
-  while (peek() != Token::RBRACE) {
-    if (Check(Token::SEMICOLON)) continue;
-    FuncNameInferrer::State fni_state(fni_);
-    const bool in_class = true;
-    bool is_computed_name = false;  // Classes do not care about computed
-                                    // property names here.
-    ExpressionClassifier property_classifier(this);
-    const AstRawString* property_name = nullptr;
-    ObjectLiteral::Property* property = ParsePropertyDefinition(
-        &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
-        &has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
-    RewriteNonPattern(&property_classifier, CHECK_OK);
-    if (classifier != nullptr) {
-      classifier->Accumulate(&property_classifier,
-                             ExpressionClassifier::ExpressionProductions);
-    }
-
-    if (has_seen_constructor && constructor == nullptr) {
-      constructor = GetPropertyValue(property)->AsFunctionLiteral();
-      DCHECK_NOT_NULL(constructor);
-      constructor->set_raw_name(
-          name != nullptr ? name : ast_value_factory()->empty_string());
+  if (property->kind() == ClassLiteralProperty::FIELD) {
+    DCHECK(allow_harmony_class_fields());
+    if (property->is_static()) {
+      if (class_info->static_initializer_var == nullptr) {
+        class_info->static_initializer_var =
+            NewTemporary(ast_value_factory()->empty_string());
+      }
+      // TODO(bakkot) only do this conditionally
+      Expression* function = InstallHomeObject(
+          property->value(),
+          factory()->NewVariableProxy(class_info->static_initializer_var));
+      ZoneList<Expression*>* args =
+          new (zone()) ZoneList<Expression*>(2, zone());
+      args->Add(function, zone());
+      args->Add(factory()->NewVariableProxy(class_info->static_initializer_var),
+                zone());
+      Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
+                                                   kNoSourcePosition);
+      property->set_value(call);
     } else {
-      properties->Add(property, zone());
-    }
-
-    if (fni_ != nullptr) fni_->Infer();
-
-    if (property_name != ast_value_factory()->constructor_string()) {
-      SetFunctionNameFromPropertyName(property, property_name);
+      // if (is_computed_name) { // TODO(bakkot) figure out why this is
+      // necessary for non-computed names in full-codegen
+      ZoneList<Expression*>* to_name_args =
+          new (zone()) ZoneList<Expression*>(1, zone());
+      to_name_args->Add(property->key(), zone());
+      property->set_key(factory()->NewCallRuntime(
+          Runtime::kToName, to_name_args, kNoSourcePosition));
+      //}
+      const AstRawString* name = ClassFieldVariableName(
+          true, ast_value_factory(),
+          class_info->instance_field_initializers->length());
+      VariableProxy* name_proxy =
+          factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+      Declaration* name_declaration = factory()->NewVariableDeclaration(
+          name_proxy, scope(), kNoSourcePosition);
+      Variable* name_var =
+          Declare(name_declaration, DeclarationDescriptor::NORMAL, CONST,
+                  kNeedsInitialization, ok, scope());
+      DCHECK(*ok);
+      if (!*ok) return;
+      class_info->instance_field_initializers->Add(property->value(), zone());
+      property->set_value(factory()->NewVariableProxy(name_var));
     }
   }
+  class_info->properties->Add(property, zone());
+}
 
-  Expect(Token::RBRACE, CHECK_OK);
+// This method rewrites a class literal into a do-expression.
+// It uses the following fields of class_info:
+//   - constructor (if missing, it updates it with a default constructor)
+//   - proxy
+//   - extends
+//   - static_initializer_var
+//   - instance_field_initializers
+//   - properties
+Expression* Parser::RewriteClassLiteral(const AstRawString* name,
+                                        ClassInfo* class_info, int pos,
+                                        bool* ok) {
   int end_pos = scanner()->location().end_pos;
-
-  if (constructor == nullptr) {
-    constructor = DefaultConstructor(name, has_extends, pos, end_pos,
-                                     block_state.language_mode());
-  }
-
-  // Note that we do not finalize this block scope because it is
-  // used as a sentinel value indicating an anonymous class.
-  block_state.set_end_position(end_pos);
-
-  if (name != nullptr) {
-    DCHECK_NOT_NULL(proxy);
-    proxy->var()->set_initializer_position(end_pos);
-  }
-
   Block* do_block = factory()->NewBlock(nullptr, 1, false, pos);
   Variable* result_var = NewTemporary(ast_value_factory()->empty_string());
-  do_block->set_scope(block_state.FinalizedBlockScope());
   DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
 
+  bool has_extends = class_info->extends != nullptr;
+  bool has_instance_fields =
+      class_info->instance_field_initializers->length() > 0;
+  DCHECK(!has_instance_fields || allow_harmony_class_fields());
+  bool has_default_constructor = class_info->constructor == nullptr;
+  if (has_default_constructor) {
+    class_info->constructor =
+        DefaultConstructor(name, has_extends, has_instance_fields, pos, end_pos,
+                           scope()->language_mode());
+  }
+
+  if (has_instance_fields && !has_extends) {
+    class_info->constructor =
+        InsertClassFieldInitializer(class_info->constructor);
+    class_info->constructor->set_requires_class_field_init(true);
+  }  // The derived case is handled by rewriting super calls.
+
+  scope()->set_end_position(end_pos);
+
+  if (name != nullptr) {
+    DCHECK_NOT_NULL(class_info->proxy);
+    class_info->proxy->var()->set_initializer_position(end_pos);
+  }
+
   ClassLiteral* class_literal = factory()->NewClassLiteral(
-      proxy, extends, constructor, properties, pos, end_pos);
+      class_info->proxy, class_info->extends, class_info->constructor,
+      class_info->properties, pos, end_pos);
+
+  if (class_info->static_initializer_var != nullptr) {
+    class_literal->set_static_initializer_proxy(
+        factory()->NewVariableProxy(class_info->static_initializer_var));
+  }
 
   do_block->statements()->Add(
-      factory()->NewExpressionStatement(class_literal, pos), zone());
-  do_expr->set_represented_function(constructor);
-  Rewriter::Rewrite(this, GetClosureScope(), do_expr, ast_value_factory());
+      factory()->NewExpressionStatement(
+          factory()->NewAssignment(Token::ASSIGN,
+                                   factory()->NewVariableProxy(result_var),
+                                   class_literal, kNoSourcePosition),
+          pos),
+      zone());
+  if (allow_harmony_class_fields() &&
+      (has_instance_fields || (has_extends && !has_default_constructor))) {
+    // Default constructors for derived classes without fields will not try to
+    // read this variable, so there's no need to create it.
+    const AstRawString* init_fn_name =
+        ast_value_factory()->dot_class_field_init_string();
+    Variable* init_fn_var = scope()->DeclareLocal(
+        init_fn_name, CONST, kCreatedInitialized, NORMAL_VARIABLE);
+    Expression* initializer =
+        has_instance_fields
+            ? static_cast<Expression*>(SynthesizeClassFieldInitializer(
+                  class_info->instance_field_initializers->length()))
+            : factory()->NewBooleanLiteral(false, kNoSourcePosition);
+    Assignment* assignment = factory()->NewAssignment(
+        Token::INIT, factory()->NewVariableProxy(init_fn_var), initializer,
+        kNoSourcePosition);
+    do_block->statements()->Add(
+        factory()->NewExpressionStatement(assignment, kNoSourcePosition),
+        zone());
+  }
+  for (int i = 0; i < class_info->instance_field_initializers->length(); ++i) {
+    const AstRawString* function_name =
+        ClassFieldVariableName(false, ast_value_factory(), i);
+    VariableProxy* function_proxy =
+        factory()->NewVariableProxy(function_name, NORMAL_VARIABLE);
+    Declaration* function_declaration = factory()->NewVariableDeclaration(
+        function_proxy, scope(), kNoSourcePosition);
+    Variable* function_var =
+        Declare(function_declaration, DeclarationDescriptor::NORMAL, CONST,
+                kNeedsInitialization, ok, scope());
+    if (!*ok) return nullptr;
+    Property* prototype_property = factory()->NewProperty(
+        factory()->NewVariableProxy(result_var),
+        factory()->NewStringLiteral(ast_value_factory()->prototype_string(),
+                                    kNoSourcePosition),
+        kNoSourcePosition);
+    Expression* function_value = InstallHomeObject(
+        class_info->instance_field_initializers->at(i),
+        prototype_property);  // TODO(bakkot) ideally this would be conditional,
+                              // especially in trivial cases
+    Assignment* function_assignment = factory()->NewAssignment(
+        Token::INIT, factory()->NewVariableProxy(function_var), function_value,
+        kNoSourcePosition);
+    do_block->statements()->Add(factory()->NewExpressionStatement(
+                                    function_assignment, kNoSourcePosition),
+                                zone());
+  }
+  do_block->set_scope(scope()->FinalizeBlockScope());
+  do_expr->set_represented_function(class_info->constructor);
 
   return do_expr;
 }
 
-
-Expression* Parser::ParseV8Intrinsic(bool* ok) {
-  // CallRuntime ::
-  //   '%' Identifier Arguments
-
-  int pos = peek_position();
-  Expect(Token::MOD, CHECK_OK);
-  // Allow "eval" or "arguments" for backward compatibility.
-  const AstRawString* name = ParseIdentifier(kAllowRestrictedIdentifiers,
-                                             CHECK_OK);
-  Scanner::Location spread_pos;
-  ExpressionClassifier classifier(this);
-  ZoneList<Expression*>* args =
-      ParseArguments(&spread_pos, &classifier, CHECK_OK);
-
-  DCHECK(!spread_pos.IsValid());
-
-  if (extension_ != NULL) {
-    // The extension structures are only accessible while parsing the
-    // very first time not when reparsing because of lazy compilation.
-    GetClosureScope()->ForceEagerCompilation();
-  }
-
-  const Runtime::Function* function = Runtime::FunctionForName(name->string());
-
-  if (function != NULL) {
-    // Check for possible name clash.
-    DCHECK_EQ(Context::kNotFound,
-              Context::IntrinsicIndexForName(name->string()));
-    // Check for built-in IS_VAR macro.
-    if (function->function_id == Runtime::kIS_VAR) {
-      DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
-      // %IS_VAR(x) evaluates to x if x is a variable,
-      // leads to a parse error otherwise.  Could be implemented as an
-      // inline function %_IS_VAR(x) to eliminate this special case.
-      if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
-        return args->at(0);
-      } else {
-        ReportMessage(MessageTemplate::kNotIsvar);
-        *ok = false;
-        return NULL;
-      }
-    }
-
-    // Check that the expected number of arguments are being passed.
-    if (function->nargs != -1 && function->nargs != args->length()) {
-      ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
-      *ok = false;
-      return NULL;
-    }
-
-    return factory()->NewCallRuntime(function, args, pos);
-  }
-
-  int context_index = Context::IntrinsicIndexForName(name->string());
-
-  // Check that the function is defined.
-  if (context_index == Context::kNotFound) {
-    ReportMessage(MessageTemplate::kNotDefined, name);
-    *ok = false;
-    return NULL;
-  }
-
-  return factory()->NewCallRuntime(context_index, args, pos);
-}
-
-
 Literal* Parser::GetLiteralUndefined(int position) {
   return factory()->NewUndefinedLiteral(position);
 }
@@ -5045,100 +3688,22 @@
   }
 }
 
-void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
-                                                  Scope* complex_params_scope,
-                                                  bool* ok) {
-  // For each variable which is used as a function declaration in a sloppy
-  // block,
-  SloppyBlockFunctionMap* map = scope->sloppy_block_function_map();
-  for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
-    AstRawString* name = static_cast<AstRawString*>(p->key);
-
-    // If the variable wouldn't conflict with a lexical declaration
-    // or parameter,
-
-    // Check if there's a conflict with a parameter.
-    // This depends on the fact that functions always have a scope solely to
-    // hold complex parameters, and the names local to that scope are
-    // precisely the names of the parameters. IsDeclaredParameter(name) does
-    // not hold for names declared by complex parameters, nor are those
-    // bindings necessarily declared lexically, so we have to check for them
-    // explicitly. On the other hand, if there are not complex parameters,
-    // it is sufficient to just check IsDeclaredParameter.
-    if (complex_params_scope != nullptr) {
-      if (complex_params_scope->LookupLocal(name) != nullptr) {
-        continue;
-      }
-    } else {
-      if (scope->IsDeclaredParameter(name)) {
-        continue;
-      }
-    }
-
-    bool var_created = false;
-
-    // Write in assignments to var for each block-scoped function declaration
-    auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
-
-    DeclarationScope* decl_scope = scope;
-    while (decl_scope->is_eval_scope()) {
-      decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
-    }
-    Scope* outer_scope = decl_scope->outer_scope();
-
-    for (SloppyBlockFunctionStatement* delegate = delegates;
-         delegate != nullptr; delegate = delegate->next()) {
-      // Check if there's a conflict with a lexical declaration
-      Scope* query_scope = delegate->scope()->outer_scope();
-      Variable* var = nullptr;
-      bool should_hoist = true;
-
-      // Note that we perform this loop for each delegate named 'name',
-      // which may duplicate work if those delegates share scopes.
-      // It is not sufficient to just do a Lookup on query_scope: for
-      // example, that does not prevent hoisting of the function in
-      // `{ let e; try {} catch (e) { function e(){} } }`
-      do {
-        var = query_scope->LookupLocal(name);
-        if (var != nullptr && IsLexicalVariableMode(var->mode())) {
-          should_hoist = false;
-          break;
-        }
-        query_scope = query_scope->outer_scope();
-      } while (query_scope != outer_scope);
-
-      if (!should_hoist) continue;
-
-      // Declare a var-style binding for the function in the outer scope
-      if (!var_created) {
-        var_created = true;
-        VariableProxy* proxy = scope->NewUnresolved(factory(), name);
-        Declaration* declaration =
-            factory()->NewVariableDeclaration(proxy, scope, kNoSourcePosition);
-        Declare(declaration, DeclarationDescriptor::NORMAL, VAR,
-                DefaultInitializationFlag(VAR), ok, scope);
-        DCHECK(ok);  // Based on the preceding check, this should not fail
-        if (!ok) return;
-      }
-
-      // Read from the local lexical scope and write to the function scope
-      VariableProxy* to = scope->NewUnresolved(factory(), name);
-      VariableProxy* from = delegate->scope()->NewUnresolved(factory(), name);
-      Expression* assignment =
-          factory()->NewAssignment(Token::ASSIGN, to, from, kNoSourcePosition);
-      Statement* statement =
-          factory()->NewExpressionStatement(assignment, kNoSourcePosition);
-      delegate->set_statement(statement);
-    }
+void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
+  // For the outermost eval scope, we cannot hoist during parsing: let
+  // declarations in the surrounding scope may prevent hoisting, but the
+  // information is unaccessible during parsing. In this case, we hoist later in
+  // DeclarationScope::Analyze.
+  if (scope->is_eval_scope() && scope->outer_scope() == original_scope_) {
+    return;
   }
+  scope->HoistSloppyBlockFunctions(factory());
 }
 
-
 // ----------------------------------------------------------------------------
 // Parser support
 
 bool Parser::TargetStackContainsLabel(const AstRawString* label) {
-  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+  for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
     if (ContainsLabel(t->statement()->labels(), label)) return true;
   }
   return false;
@@ -5148,7 +3713,7 @@
 BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
                                               bool* ok) {
   bool anonymous = label == NULL;
-  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+  for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
     BreakableStatement* stat = t->statement();
     if ((anonymous && stat->is_target_for_anonymous()) ||
         (!anonymous && ContainsLabel(stat->labels(), label))) {
@@ -5162,7 +3727,7 @@
 IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
                                                  bool* ok) {
   bool anonymous = label == NULL;
-  for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+  for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
     IterationStatement* stat = t->statement()->AsIterationStatement();
     if (stat == NULL) continue;
 
@@ -5188,7 +3753,7 @@
 
 
 void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
-  // Internalize strings.
+  // Internalize strings and values.
   ast_value_factory()->Internalize(isolate);
 
   // Error processing.
@@ -5240,12 +3805,6 @@
   DCHECK(parsing_on_main_thread_);
   Isolate* isolate = info->isolate();
   pre_parse_timer_ = isolate->counters()->pre_parse();
-  if (FLAG_trace_parse || allow_natives() || extension_ != NULL) {
-    // If intrinsics are allowed, the Parser cannot operate independent of the
-    // V8 heap because of Runtime. Tell the string table to internalize strings
-    // and values right after they're created.
-    ast_value_factory()->Internalize(isolate);
-  }
 
   if (info->is_lazy()) {
     DCHECK(!info->is_eval());
@@ -5261,7 +3820,6 @@
   info->set_literal(result);
 
   Internalize(isolate, info->script(), result == NULL);
-  DCHECK(ast_value_factory()->IsInternalized());
   return (result != NULL);
 }
 
@@ -5282,11 +3840,11 @@
     stream_ptr = info->character_stream();
   } else {
     DCHECK(info->character_stream() == nullptr);
-    stream.reset(new ExternalStreamingStream(info->source_stream(),
-                                             info->source_stream_encoding()));
+    stream.reset(ScannerStream::For(info->source_stream(),
+                                    info->source_stream_encoding()));
     stream_ptr = stream.get();
   }
-  DCHECK(info->context().is_null() || info->context()->IsNativeContext());
+  DCHECK(info->maybe_outer_scope_info().is_null());
 
   DCHECK(original_scope_);
 
@@ -5431,11 +3989,9 @@
   return running_hash;
 }
 
-
-ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
-    ZoneList<v8::internal::Expression*>* list) {
-  ZoneList<v8::internal::Expression*>* args =
-      new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+ZoneList<Expression*>* Parser::PrepareSpreadArguments(
+    ZoneList<Expression*>* list) {
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
   if (list->length() == 1) {
     // Spread-call with single spread argument produces an InternalArray
     // containing the values from the array.
@@ -5462,8 +4018,8 @@
     int n = list->length();
     while (i < n) {
       if (!list->at(i)->IsSpread()) {
-        ZoneList<v8::internal::Expression*>* unspread =
-            new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+        ZoneList<Expression*>* unspread =
+            new (zone()) ZoneList<Expression*>(1, zone());
 
         // Push array of unspread parameters
         while (i < n && !list->at(i)->IsSpread()) {
@@ -5478,15 +4034,15 @@
       }
 
       // Push eagerly spread argument
-      ZoneList<v8::internal::Expression*>* spread_list =
-          new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+      ZoneList<Expression*>* spread_list =
+          new (zone()) ZoneList<Expression*>(1, zone());
       spread_list->Add(list->at(i++)->AsSpread()->expression(), zone());
       args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
                                           spread_list, kNoSourcePosition),
                 zone());
     }
 
-    list = new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+    list = new (zone()) ZoneList<Expression*>(1, zone());
     list->Add(factory()->NewCallRuntime(Context::SPREAD_ARGUMENTS_INDEX, args,
                                         kNoSourcePosition),
               zone());
@@ -5495,10 +4051,8 @@
   UNREACHABLE();
 }
 
-
 Expression* Parser::SpreadCall(Expression* function,
-                               ZoneList<v8::internal::Expression*>* args,
-                               int pos) {
+                               ZoneList<Expression*>* args, int pos) {
   if (function->IsSuperCallReference()) {
     // Super calls
     // $super_constructor = %_GetSuperConstructor(<this-function>)
@@ -5540,10 +4094,8 @@
   }
 }
 
-
 Expression* Parser::SpreadCallNew(Expression* function,
-                                  ZoneList<v8::internal::Expression*>* args,
-                                  int pos) {
+                                  ZoneList<Expression*>* args, int pos) {
   args->InsertAt(0, function, zone());
 
   return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
@@ -5562,90 +4114,141 @@
   scope->SetLanguageMode(mode);
 }
 
-
-void Parser::RaiseLanguageMode(LanguageMode mode) {
-  LanguageMode old = scope()->language_mode();
-  SetLanguageMode(scope(), old > mode ? old : mode);
+void Parser::SetAsmModule() {
+  // Store the usage count; The actual use counter on the isolate is
+  // incremented after parsing is done.
+  ++use_counts_[v8::Isolate::kUseAsm];
+  DCHECK(scope()->is_declaration_scope());
+  scope()->AsDeclarationScope()->set_asm_module();
 }
 
 void Parser::MarkCollectedTailCallExpressions() {
   const ZoneList<Expression*>& tail_call_expressions =
       function_state_->tail_call_expressions().expressions();
   for (int i = 0; i < tail_call_expressions.length(); ++i) {
-    Expression* expression = tail_call_expressions[i];
-    // If only FLAG_harmony_explicit_tailcalls is enabled then expression
-    // must be a Call expression.
-    DCHECK(FLAG_harmony_tailcalls || !FLAG_harmony_explicit_tailcalls ||
-           expression->IsCall());
-    MarkTailPosition(expression);
+    MarkTailPosition(tail_call_expressions[i]);
   }
 }
 
-Expression* ParserBaseTraits<Parser>::ExpressionListToExpression(
-    ZoneList<Expression*>* args) {
-  AstNodeFactory* factory = delegate()->factory();
+Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
   Expression* expr = args->at(0);
   for (int i = 1; i < args->length(); ++i) {
-    expr = factory->NewBinaryOperation(Token::COMMA, expr, args->at(i),
-                                       expr->position());
+    expr = factory()->NewBinaryOperation(Token::COMMA, expr, args->at(i),
+                                         expr->position());
   }
   return expr;
 }
 
+// This method intoduces the line initializing the generator object
+// when desugaring the body of async_function.
+void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
+                                      FunctionKind kind, int pos) {
+  // function async_function() {
+  //   .generator_object = %CreateGeneratorObject();
+  //   BuildRejectPromiseOnException({
+  //     ... block ...
+  //     return %ResolvePromise(.promise, expr), .promise;
+  //   })
+  // }
+
+  Variable* temp =
+      NewTemporary(ast_value_factory()->dot_generator_object_string());
+  function_state_->set_generator_object_variable(temp);
+
+  Expression* init_generator_variable = factory()->NewAssignment(
+      Token::INIT, factory()->NewVariableProxy(temp),
+      BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
+  body->Add(factory()->NewExpressionStatement(init_generator_variable,
+                                              kNoSourcePosition),
+            zone());
+}
+
+// This method completes the desugaring of the body of async_function.
+void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
+                                      Expression* return_value, bool* ok) {
+  // function async_function() {
+  //   .generator_object = %CreateGeneratorObject();
+  //   BuildRejectPromiseOnException({
+  //     ... block ...
+  //     return %ResolvePromise(.promise, expr), .promise;
+  //   })
+  // }
+
+  return_value = BuildResolvePromise(return_value, return_value->position());
+  block->statements()->Add(
+      factory()->NewReturnStatement(return_value, return_value->position()),
+      zone());
+  block = BuildRejectPromiseOnException(block, CHECK_OK_VOID);
+  body->Add(block, zone());
+}
+
 Expression* Parser::RewriteAwaitExpression(Expression* value, int await_pos) {
-  // yield %AsyncFunctionAwait(.generator_object, <operand>)
+  // yield do {
+  //   tmp = <operand>;
+  //   %AsyncFunctionAwait(.generator_object, tmp, .promise);
+  //   .promise
+  // }
+  // The value of the expression is returned to the caller of the async
+  // function for the first yield statement; for this, .promise is the
+  // appropriate return value, being a Promise that will be fulfilled or
+  // rejected with the appropriate value by the desugaring. Subsequent yield
+  // occurrences will return to the AsyncFunctionNext call within the
+  // implemementation of the intermediate throwaway Promise's then handler.
+  // This handler has nothing useful to do with the value, as the Promise is
+  // ignored. If we yielded the value of the throwawayPromise that
+  // AsyncFunctionAwait creates as an intermediate, it would create a memory
+  // leak; we must return .promise instead;
+  // The operand needs to be evaluated on a separate statement in order to get
+  // a break location, and the .promise needs to be read earlier so that it
+  // doesn't insert a false location.
+  // TODO(littledan): investigate why this ordering is needed in more detail.
   Variable* generator_object_variable =
-      delegate()->function_state_->generator_object_variable();
+      function_state_->generator_object_variable();
 
   // If generator_object_variable is null,
+  // TODO(littledan): Is this necessary?
   if (!generator_object_variable) return value;
 
-  auto factory = delegate()->factory();
   const int nopos = kNoSourcePosition;
 
-  Variable* temp_var =
-      delegate()->NewTemporary(delegate()->ast_value_factory()->empty_string());
-  VariableProxy* temp_proxy = factory->NewVariableProxy(temp_var);
-  Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
+  Block* do_block = factory()->NewBlock(nullptr, 2, false, nopos);
+
+  Variable* promise = PromiseVariable();
 
   // Wrap value evaluation to provide a break location.
-  Expression* value_assignment =
-      factory->NewAssignment(Token::ASSIGN, temp_proxy, value, nopos);
+  Variable* temp_var = NewTemporary(ast_value_factory()->empty_string());
+  Expression* value_assignment = factory()->NewAssignment(
+      Token::ASSIGN, factory()->NewVariableProxy(temp_var), value, nopos);
   do_block->statements()->Add(
-      factory->NewExpressionStatement(value_assignment, value->position()),
+      factory()->NewExpressionStatement(value_assignment, value->position()),
       zone());
 
   ZoneList<Expression*>* async_function_await_args =
-      new (zone()) ZoneList<Expression*>(2, zone());
+      new (zone()) ZoneList<Expression*>(3, zone());
   Expression* generator_object =
-      factory->NewVariableProxy(generator_object_variable);
+      factory()->NewVariableProxy(generator_object_variable);
   async_function_await_args->Add(generator_object, zone());
-  async_function_await_args->Add(temp_proxy, zone());
-  Expression* async_function_await = delegate()->factory()->NewCallRuntime(
-      Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args, nopos);
-  // Wrap await to provide a break location between value evaluation and yield.
-  Expression* await_assignment = factory->NewAssignment(
-      Token::ASSIGN, temp_proxy, async_function_await, nopos);
+  async_function_await_args->Add(factory()->NewVariableProxy(temp_var), zone());
+  async_function_await_args->Add(factory()->NewVariableProxy(promise), zone());
+
+  // The parser emits calls to AsyncFunctionAwaitCaught, but the
+  // AstNumberingVisitor will rewrite this to AsyncFunctionAwaitUncaught
+  // if there is no local enclosing try/catch block.
+  Expression* async_function_await =
+      factory()->NewCallRuntime(Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX,
+                                async_function_await_args, nopos);
   do_block->statements()->Add(
-      factory->NewExpressionStatement(await_assignment, await_pos), zone());
-  Expression* do_expr = factory->NewDoExpression(do_block, temp_var, nopos);
+      factory()->NewExpressionStatement(async_function_await, await_pos),
+      zone());
 
-  generator_object = factory->NewVariableProxy(generator_object_variable);
-  return factory->NewYield(generator_object, do_expr, nopos,
-                           Yield::kOnExceptionRethrow);
+  // Wrap await to provide a break location between value evaluation and yield.
+  Expression* do_expr = factory()->NewDoExpression(do_block, promise, nopos);
+
+  generator_object = factory()->NewVariableProxy(generator_object_variable);
+  return factory()->NewYield(generator_object, do_expr, nopos,
+                             Yield::kOnExceptionRethrow);
 }
 
-ZoneList<Expression*>* ParserBaseTraits<Parser>::GetNonPatternList() const {
-  return delegate()->function_state_->non_patterns_to_rewrite();
-}
-
-ZoneList<typename ParserBaseTraits<Parser>::Type::ExpressionClassifier::Error>*
-ParserBaseTraits<Parser>::GetReportedErrorList() const {
-  return delegate()->function_state_->GetReportedErrorList();
-}
-
-Zone* ParserBaseTraits<Parser>::zone() const { return delegate()->zone(); }
-
 class NonPatternRewriter : public AstExpressionRewriter {
  public:
   NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
@@ -5674,7 +4277,7 @@
     return false;
   }
 
-  void VisitObjectLiteralProperty(ObjectLiteralProperty* property) override {
+  void VisitLiteralProperty(LiteralProperty* property) override {
     if (property == nullptr) return;
     // Do not rewrite (computed) key expressions
     AST_REWRITE_PROPERTY(Expression, property, value);
@@ -5683,11 +4286,10 @@
   Parser* parser_;
 };
 
-
-void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
-  ValidateExpression(classifier, CHECK_OK_VOID);
+void Parser::RewriteNonPattern(bool* ok) {
+  ValidateExpression(CHECK_OK_VOID);
   auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
-  int begin = classifier->GetNonPatternBegin();
+  int begin = classifier()->GetNonPatternBegin();
   int end = non_patterns_to_rewrite->length();
   if (begin < end) {
     NonPatternRewriter rewriter(stack_limit_, this);
@@ -5711,8 +4313,11 @@
         pair.assignment->AsRewritableExpression();
     DCHECK_NOT_NULL(to_rewrite);
     if (!to_rewrite->is_rewritten()) {
-      PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite,
-                                                      pair.scope);
+      // Since this function is called at the end of parsing the program,
+      // pair.scope may already have been removed by FinalizeBlockScope in the
+      // meantime.
+      Scope* scope = pair.scope->GetUnremovedScope();
+      PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
     }
   }
 }
@@ -5733,8 +4338,8 @@
 
     Expression* result;
     DCHECK_NOT_NULL(lhs->raw_name());
-    result = this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
-                                            lhs->end_position());
+    result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
+                                      lhs->end_position());
     args->Add(left, zone());
     args->Add(right, zone());
     Expression* call =
@@ -5807,8 +4412,7 @@
       // ++($R.length)
       if (!value->IsLiteral() ||
           !value->AsLiteral()->raw_value()->IsTheHole()) {
-        ZoneList<Expression*>* append_element_args =
-            NewExpressionList(2, zone());
+        ZoneList<Expression*>* append_element_args = NewExpressionList(2);
         append_element_args->Add(factory()->NewVariableProxy(result), zone());
         append_element_args->Add(value, zone());
         do_block->statements()->Add(
@@ -5837,8 +4441,7 @@
       // %AppendElement($R, each)
       Statement* append_body;
       {
-        ZoneList<Expression*>* append_element_args =
-            NewExpressionList(2, zone());
+        ZoneList<Expression*>* append_element_args = NewExpressionList(2);
         append_element_args->Add(factory()->NewVariableProxy(result), zone());
         append_element_args->Add(factory()->NewVariableProxy(each), zone());
         append_body = factory()->NewExpressionStatement(
@@ -5865,7 +4468,7 @@
 void Parser::QueueDestructuringAssignmentForRewriting(Expression* expr) {
   DCHECK(expr->IsRewritableExpression());
   function_state_->AddDestructuringAssignment(
-      DestructuringAssignment(expr, delegate()->scope()));
+      DestructuringAssignment(expr, scope()));
 }
 
 void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
@@ -5873,43 +4476,38 @@
   function_state_->AddNonPatternForRewriting(expr, ok);
 }
 
-void ParserBaseTraits<Parser>::SetFunctionNameFromPropertyName(
-    ObjectLiteralProperty* property, const AstRawString* name) {
-  Expression* value = property->value();
+void Parser::AddAccessorPrefixToFunctionName(bool is_get,
+                                             FunctionLiteral* function,
+                                             const AstRawString* name) {
+  DCHECK_NOT_NULL(name);
+  const AstRawString* prefix = is_get ? ast_value_factory()->get_space_string()
+                                      : ast_value_factory()->set_space_string();
+  function->set_raw_name(ast_value_factory()->NewConsString(prefix, name));
+}
+
+void Parser::SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+                                             const AstRawString* name) {
+  DCHECK(property->kind() != ObjectLiteralProperty::GETTER);
+  DCHECK(property->kind() != ObjectLiteralProperty::SETTER);
 
   // Computed name setting must happen at runtime.
-  if (property->is_computed_name()) return;
-
-  // Getter and setter names are handled here because their names
-  // change in ES2015, even though they are not anonymous.
-  auto function = value->AsFunctionLiteral();
-  if (function != nullptr) {
-    bool is_getter = property->kind() == ObjectLiteralProperty::GETTER;
-    bool is_setter = property->kind() == ObjectLiteralProperty::SETTER;
-    if (is_getter || is_setter) {
-      DCHECK_NOT_NULL(name);
-      const AstRawString* prefix =
-          is_getter ? delegate()->ast_value_factory()->get_space_string()
-                    : delegate()->ast_value_factory()->set_space_string();
-      function->set_raw_name(
-          delegate()->ast_value_factory()->NewConsString(prefix, name));
-      return;
-    }
-  }
+  DCHECK(!property->is_computed_name());
 
   // Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
   // of an object literal.
   if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
 
+  Expression* value = property->value();
+
   DCHECK(!value->IsAnonymousFunctionDefinition() ||
          property->kind() == ObjectLiteralProperty::COMPUTED);
-  delegate()->SetFunctionName(value, name);
+  SetFunctionName(value, name);
 }
 
-void ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(
-    Expression* value, Expression* identifier) {
+void Parser::SetFunctionNameFromIdentifierRef(Expression* value,
+                                              Expression* identifier) {
   if (!identifier->IsVariableProxy()) return;
-  delegate()->SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
+  SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
 }
 
 void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
@@ -6050,7 +4648,7 @@
   Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
   Statement* get_iterator;
   {
-    Expression* iterator = GetIterator(iterable, factory(), nopos);
+    Expression* iterator = GetIterator(iterable, nopos);
     Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
     Expression* assignment = factory()->NewAssignment(
         Token::ASSIGN, iterator_proxy, iterator, nopos);
@@ -6155,7 +4753,7 @@
 
     Block* then = factory()->NewBlock(nullptr, 4 + 1, false, nopos);
     BuildIteratorCloseForCompletion(
-        then->statements(), var_iterator,
+        scope(), then->statements(), var_iterator,
         factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos));
     then->statements()->Add(throw_call, zone());
     check_throw = factory()->NewIfStatement(
@@ -6259,7 +4857,7 @@
   // input = function.sent;
   Statement* get_input;
   {
-    Expression* function_sent = FunctionSentExpression(factory(), nopos);
+    Expression* function_sent = FunctionSentExpression(nopos);
     Expression* input_proxy = factory()->NewVariableProxy(var_input);
     Expression* assignment = factory()->NewAssignment(
         Token::ASSIGN, input_proxy, function_sent, nopos);
@@ -6313,9 +4911,8 @@
     Scope* catch_scope = NewScope(CATCH_SCOPE);
     catch_scope->set_is_hidden();
     const AstRawString* name = ast_value_factory()->dot_catch_string();
-    Variable* catch_variable =
-        catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
-                                               Variable::NORMAL);
+    Variable* catch_variable = catch_scope->DeclareLocal(
+        name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
 
     try_catch = factory()->NewTryCatchStatementForDesugaring(
         try_block, catch_scope, catch_variable, catch_block, nopos);
@@ -6524,9 +5121,9 @@
   statements->Add(validate_output, zone());
 }
 
-void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
-                                 Variable* iter, Block* iterator_use,
-                                 Block* target) {
+void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
+                                 Expression* condition, Variable* iter,
+                                 Block* iterator_use, Block* target) {
   //
   // This function adds two statements to [target], corresponding to the
   // following code:
@@ -6582,7 +5179,8 @@
   {
     Block* block = factory()->NewBlock(nullptr, 2, true, nopos);
     Expression* proxy = factory()->NewVariableProxy(completion);
-    BuildIteratorCloseForCompletion(block->statements(), iter, proxy);
+    BuildIteratorCloseForCompletion(use_scope, block->statements(), iter,
+                                    proxy);
     DCHECK(block->statements()->length() == 2);
 
     maybe_close = factory()->NewBlock(nullptr, 1, true, nopos);
@@ -6599,10 +5197,10 @@
   // }
   Statement* try_catch;
   {
-    Scope* catch_scope = NewScopeWithParent(scope(), CATCH_SCOPE);
+    Scope* catch_scope = NewScopeWithParent(use_scope, CATCH_SCOPE);
     Variable* catch_variable =
         catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                  kCreatedInitialized, Variable::NORMAL);
+                                  kCreatedInitialized, NORMAL_VARIABLE);
     catch_scope->set_is_hidden();
 
     Statement* rethrow;
@@ -6639,7 +5237,8 @@
   target->statements()->Add(try_finally, zone());
 }
 
-void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+void Parser::BuildIteratorCloseForCompletion(Scope* scope,
+                                             ZoneList<Statement*>* statements,
                                              Variable* iterator,
                                              Expression* completion) {
   //
@@ -6705,10 +5304,10 @@
 
     Block* catch_block = factory()->NewBlock(nullptr, 0, false, nopos);
 
-    Scope* catch_scope = NewScope(CATCH_SCOPE);
+    Scope* catch_scope = NewScopeWithParent(scope, CATCH_SCOPE);
     Variable* catch_variable =
         catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                  kCreatedInitialized, Variable::NORMAL);
+                                  kCreatedInitialized, NORMAL_VARIABLE);
     catch_scope->set_is_hidden();
 
     try_call_return = factory()->NewTryCatchStatement(
@@ -6842,20 +5441,18 @@
     Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
     try_block->statements()->Add(loop, zone());
 
-    FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
-                        try_block, final_loop);
+    // The scope in which the parser creates this loop.
+    Scope* loop_scope = scope()->outer_scope();
+    DCHECK_EQ(loop_scope->scope_type(), BLOCK_SCOPE);
+    DCHECK_EQ(scope()->scope_type(), BLOCK_SCOPE);
+
+    FinalizeIteratorUse(loop_scope, var_completion, closing_condition,
+                        loop->iterator(), try_block, final_loop);
   }
 
   return final_loop;
 }
 
-#ifdef DEBUG
-void Parser::Print(AstNode* node) {
-  ast_value_factory()->Internalize(Isolate::Current());
-  node->Print(Isolate::Current());
-}
-#endif  // DEBUG
-
 #undef CHECK_OK
 #undef CHECK_OK_VOID
 #undef CHECK_FAILED
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index b069f9a..418bedf 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -21,7 +21,8 @@
 
 class ParseInfo;
 class ScriptData;
-class Target;
+class ParserTarget;
+class ParserTargetScope;
 
 class FunctionEntry BASE_EMBEDDED {
  public:
@@ -138,239 +139,33 @@
 };
 
 template <>
-class ParserBaseTraits<Parser> {
- public:
-  typedef ParserBaseTraits<Parser> ParserTraits;
+struct ParserTypes<Parser> {
+  typedef ParserBase<Parser> Base;
+  typedef Parser Impl;
 
-  struct Type {
-    typedef Variable GeneratorVariable;
+  typedef v8::internal::Variable Variable;
 
-    typedef v8::internal::AstProperties AstProperties;
+  // Return types for traversing functions.
+  typedef const AstRawString* Identifier;
+  typedef v8::internal::Expression* Expression;
+  typedef v8::internal::FunctionLiteral* FunctionLiteral;
+  typedef ObjectLiteral::Property* ObjectLiteralProperty;
+  typedef ClassLiteral::Property* ClassLiteralProperty;
+  typedef ZoneList<v8::internal::Expression*>* ExpressionList;
+  typedef ZoneList<ObjectLiteral::Property*>* ObjectPropertyList;
+  typedef ZoneList<ClassLiteral::Property*>* ClassPropertyList;
+  typedef ParserFormalParameters FormalParameters;
+  typedef v8::internal::Statement* Statement;
+  typedef ZoneList<v8::internal::Statement*>* StatementList;
+  typedef v8::internal::Block* Block;
+  typedef v8::internal::BreakableStatement* BreakableStatement;
+  typedef v8::internal::IterationStatement* IterationStatement;
 
-    typedef v8::internal::ExpressionClassifier<ParserTraits>
-        ExpressionClassifier;
+  // For constructing objects returned by the traversing functions.
+  typedef AstNodeFactory Factory;
 
-    // Return types for traversing functions.
-    typedef const AstRawString* Identifier;
-    typedef v8::internal::Expression* Expression;
-    typedef Yield* YieldExpression;
-    typedef v8::internal::FunctionLiteral* FunctionLiteral;
-    typedef v8::internal::ClassLiteral* ClassLiteral;
-    typedef v8::internal::Literal* Literal;
-    typedef ObjectLiteral::Property* ObjectLiteralProperty;
-    typedef ZoneList<v8::internal::Expression*>* ExpressionList;
-    typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
-    typedef ParserFormalParameters::Parameter FormalParameter;
-    typedef ParserFormalParameters FormalParameters;
-    typedef ZoneList<v8::internal::Statement*>* StatementList;
-
-    // For constructing objects returned by the traversing functions.
-    typedef AstNodeFactory Factory;
-  };
-
-  // TODO(nikolaos): The traits methods should not need to call methods
-  // of the implementation object.
-  Parser* delegate() { return reinterpret_cast<Parser*>(this); }
-  const Parser* delegate() const {
-    return reinterpret_cast<const Parser*>(this);
-  }
-
-  // Helper functions for recursive descent.
-  bool IsEval(const AstRawString* identifier) const;
-  bool IsArguments(const AstRawString* identifier) const;
-  bool IsEvalOrArguments(const AstRawString* identifier) const;
-  bool IsUndefined(const AstRawString* identifier) const;
-  V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const;
-
-  // Returns true if the expression is of type "this.foo".
-  static bool IsThisProperty(Expression* expression);
-
-  static bool IsIdentifier(Expression* expression);
-
-  static const AstRawString* AsIdentifier(Expression* expression) {
-    DCHECK(IsIdentifier(expression));
-    return expression->AsVariableProxy()->raw_name();
-  }
-
-  bool IsPrototype(const AstRawString* identifier) const;
-
-  bool IsConstructor(const AstRawString* identifier) const;
-
-  bool IsDirectEvalCall(Expression* expression) const {
-    if (!expression->IsCall()) return false;
-    expression = expression->AsCall()->expression();
-    return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
-  }
-
-  static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
-    return ObjectLiteral::IsBoilerplateProperty(property);
-  }
-
-  static bool IsArrayIndex(const AstRawString* string, uint32_t* index) {
-    return string->AsArrayIndex(index);
-  }
-
-  static Expression* GetPropertyValue(ObjectLiteral::Property* property) {
-    return property->value();
-  }
-
-  // Functions for encapsulating the differences between parsing and preparsing;
-  // operations interleaved with the recursive descent.
-  static void PushLiteralName(FuncNameInferrer* fni, const AstRawString* id) {
-    fni->PushLiteralName(id);
-  }
-
-  void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
-
-  static void InferFunctionName(FuncNameInferrer* fni,
-                                FunctionLiteral* func_to_infer) {
-    fni->AddFunction(func_to_infer);
-  }
-
-  // If we assign a function literal to a property we pretenure the
-  // literal so it can be added as a constant function property.
-  static void CheckAssigningFunctionLiteralToProperty(Expression* left,
-                                                      Expression* right);
-
-  // Determine if the expression is a variable proxy and mark it as being used
-  // in an assignment or with a increment/decrement operator.
-  static Expression* MarkExpressionAsAssigned(Expression* expression);
-
-  // Returns true if we have a binary expression between two numeric
-  // literals. In that case, *x will be changed to an expression which is the
-  // computed value.
-  bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
-                                              Token::Value op, int pos,
-                                              AstNodeFactory* factory);
-
-  // Rewrites the following types of unary expressions:
-  // not <literal> -> true / false
-  // + <numeric literal> -> <numeric literal>
-  // - <numeric literal> -> <numeric literal with value negated>
-  // ! <literal> -> true / false
-  // The following rewriting rules enable the collection of type feedback
-  // without any special stub and the multiplication is removed later in
-  // Crankshaft's canonicalization pass.
-  // + foo -> foo * 1
-  // - foo -> foo * (-1)
-  // ~ foo -> foo ^(~0)
-  Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
-                                   int pos, AstNodeFactory* factory);
-
-  Expression* BuildIteratorResult(Expression* value, bool done);
-
-  // Generate AST node that throws a ReferenceError with the given type.
-  Expression* NewThrowReferenceError(MessageTemplate::Template message,
-                                     int pos);
-
-  // Generate AST node that throws a SyntaxError with the given
-  // type. The first argument may be null (in the handle sense) in
-  // which case no arguments are passed to the constructor.
-  Expression* NewThrowSyntaxError(MessageTemplate::Template message,
-                                  const AstRawString* arg, int pos);
-
-  // Generate AST node that throws a TypeError with the given
-  // type. Both arguments must be non-null (in the handle sense).
-  Expression* NewThrowTypeError(MessageTemplate::Template message,
-                                const AstRawString* arg, int pos);
-
-  // Reporting errors.
-  void ReportMessageAt(Scanner::Location source_location,
-                       MessageTemplate::Template message,
-                       const char* arg = NULL,
-                       ParseErrorType error_type = kSyntaxError);
-  void ReportMessageAt(Scanner::Location source_location,
-                       MessageTemplate::Template message,
-                       const AstRawString* arg,
-                       ParseErrorType error_type = kSyntaxError);
-
-  // "null" return type creators.
-  static const AstRawString* EmptyIdentifier() { return nullptr; }
-  static Expression* EmptyExpression() { return nullptr; }
-  static Literal* EmptyLiteral() { return nullptr; }
-  static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return nullptr; }
-  static FunctionLiteral* EmptyFunctionLiteral() { return nullptr; }
-
-  // Used in error return values.
-  static ZoneList<Expression*>* NullExpressionList() { return nullptr; }
-
-  // Non-NULL empty string.
-  V8_INLINE const AstRawString* EmptyIdentifierString() const;
-
-  // Odd-ball literal creators.
-  Literal* GetLiteralTheHole(int position, AstNodeFactory* factory) const;
-
-  // Producing data during the recursive descent.
-  const AstRawString* GetSymbol(Scanner* scanner) const;
-  const AstRawString* GetNextSymbol(Scanner* scanner) const;
-  const AstRawString* GetNumberAsSymbol(Scanner* scanner) const;
-
-  Expression* ThisExpression(int pos = kNoSourcePosition);
-  Expression* NewSuperPropertyReference(AstNodeFactory* factory, int pos);
-  Expression* NewSuperCallReference(AstNodeFactory* factory, int pos);
-  Expression* NewTargetExpression(int pos);
-  Expression* FunctionSentExpression(AstNodeFactory* factory, int pos) const;
-  Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
-                                 AstNodeFactory* factory) const;
-  Expression* ExpressionFromIdentifier(const AstRawString* name,
-                                       int start_position, int end_position,
-                                       InferName = InferName::kYes);
-  Expression* ExpressionFromString(int pos, Scanner* scanner,
-                                   AstNodeFactory* factory) const;
-  Expression* GetIterator(Expression* iterable, AstNodeFactory* factory,
-                          int pos);
-  ZoneList<v8::internal::Expression*>* NewExpressionList(int size,
-                                                         Zone* zone) const {
-    return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
-  }
-  ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size,
-                                                      Zone* zone) const {
-    return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
-  }
-  ZoneList<v8::internal::Statement*>* NewStatementList(int size,
-                                                       Zone* zone) const {
-    return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
-  }
-
-  V8_INLINE void AddParameterInitializationBlock(
-      const ParserFormalParameters& parameters,
-      ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok);
-
-  V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
-                                    Expression* pattern,
-                                    Expression* initializer,
-                                    int initializer_end_position, bool is_rest);
-  V8_INLINE void DeclareFormalParameter(
-      DeclarationScope* scope,
-      const ParserFormalParameters::Parameter& parameter,
-      Type::ExpressionClassifier* classifier);
-  void ParseArrowFunctionFormalParameterList(
-      ParserFormalParameters* parameters, Expression* params,
-      const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
-      const Scope::Snapshot& scope_snapshot, bool* ok);
-
-  void ReindexLiterals(const ParserFormalParameters& parameters);
-
-  V8_INLINE Expression* NoTemplateTag() { return NULL; }
-  V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
-    return tag != NULL;
-  }
-
-  V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
-
-  Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
-
-  void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
-                                       const AstRawString* name);
-
-  void SetFunctionNameFromIdentifierRef(Expression* value,
-                                        Expression* identifier);
-
-  V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
-      GetReportedErrorList() const;
-  V8_INLINE Zone* zone() const;
-
-  V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
+  typedef ParserTarget Target;
+  typedef ParserTargetScope TargetScope;
 };
 
 class Parser : public ParserBase<Parser> {
@@ -390,8 +185,16 @@
   bool Parse(ParseInfo* info);
   void ParseOnBackground(ParseInfo* info);
 
-  void DeserializeScopeChain(ParseInfo* info, Handle<Context> context,
-                             Scope::DeserializationMode deserialization_mode);
+  // Deserialize the scope chain prior to parsing in which the script is going
+  // to be executed. If the script is a top-level script, or the scope chain
+  // consists of only a native context, maybe_outer_scope_info should be an
+  // empty handle.
+  //
+  // This only deserializes the scope chain, but doesn't connect the scopes to
+  // their corresponding scope infos. Therefore, looking up variables in the
+  // deserialized scopes is not possible.
+  void DeserializeScopeChain(ParseInfo* info,
+                             MaybeHandle<ScopeInfo> maybe_outer_scope_info);
 
   // Handle errors detected during parsing, move statistics to Isolate,
   // internalize strings (move them to the heap).
@@ -400,9 +203,7 @@
 
  private:
   friend class ParserBase<Parser>;
-  // TODO(nikolaos): This should not be necessary. It will be removed
-  // when the traits object stops delegating to the implementation object.
-  friend class ParserBaseTraits<Parser>;
+  friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
 
   // Runtime encoding of different completion modes.
   enum CompletionKind {
@@ -411,18 +212,12 @@
     kAbruptCompletion
   };
 
-  enum class FunctionBodyType { kNormal, kSingleExpression };
-
-  DeclarationScope* GetDeclarationScope() const {
-    return scope()->GetDeclarationScope();
-  }
-  DeclarationScope* GetClosureScope() const {
-    return scope()->GetClosureScope();
-  }
   Variable* NewTemporary(const AstRawString* name) {
     return scope()->NewTemporary(name);
   }
 
+  void PrepareGeneratorVariables(FunctionState* function_state);
+
   // Limit the allowed number of local variables in a function. The hard limit
   // is that offsets computed by FullCodeGenerator::StackOperand and similar
   // functions are ints, and they should not overflow. In addition, accessing
@@ -455,12 +250,6 @@
     return compile_options_ == ScriptCompiler::kProduceParserCache;
   }
 
-  // All ParseXXX functions take as the last argument an *ok parameter
-  // which is set to false if parsing failed; it is unchanged otherwise.
-  // By making the 'exception handling' explicit, we are forced to check
-  // for failure at the call sites.
-  void ParseStatementList(ZoneList<Statement*>* body, int end_token, bool* ok);
-  Statement* ParseStatementListItem(bool* ok);
   void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
   Statement* ParseModuleItem(bool* ok);
   const AstRawString* ParseModuleSpecifier(bool* ok);
@@ -482,75 +271,52 @@
           location(location) {}
   };
   ZoneList<const NamedImport*>* ParseNamedImports(int pos, bool* ok);
-  Statement* ParseStatement(ZoneList<const AstRawString*>* labels,
-                            AllowLabelledFunctionStatement allow_function,
-                            bool* ok);
-  Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels,
-                               AllowLabelledFunctionStatement allow_function,
-                               bool* ok);
-  Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
-                                   bool* ok);
-  Statement* ParseFunctionDeclaration(bool* ok);
-  Statement* ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
-                                       bool default_export, bool* ok);
-  Statement* ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
-                                       ZoneList<const AstRawString*>* names,
-                                       bool default_export, bool* ok);
-  Statement* ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
-                                           bool default_export, bool* ok);
-  Expression* ParseAsyncFunctionExpression(bool* ok);
-  Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
-                                   bool default_export, bool* ok);
-  Statement* ParseNativeDeclaration(bool* ok);
-  Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
-  Block* ParseVariableStatement(VariableDeclarationContext var_context,
-                                ZoneList<const AstRawString*>* names,
-                                bool* ok);
-  DoExpression* ParseDoExpression(bool* ok);
-  Expression* ParseYieldStarExpression(bool* ok);
+  Block* BuildInitializationBlock(DeclarationParsingResult* parsing_result,
+                                  ZoneList<const AstRawString*>* names,
+                                  bool* ok);
+  void DeclareAndInitializeVariables(
+      Block* block, const DeclarationDescriptor* declaration_descriptor,
+      const DeclarationParsingResult::Declaration* declaration,
+      ZoneList<const AstRawString*>* names, bool* ok);
+  ZoneList<const AstRawString*>* DeclareLabel(
+      ZoneList<const AstRawString*>* labels, VariableProxy* expr, bool* ok);
+  bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+                     const AstRawString* label);
+  Expression* RewriteReturn(Expression* return_value, int pos);
+  Statement* RewriteSwitchStatement(Expression* tag,
+                                    SwitchStatement* switch_statement,
+                                    ZoneList<CaseClause*>* cases, Scope* scope);
+  void RewriteCatchPattern(CatchInfo* catch_info, bool* ok);
+  void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok);
+  Statement* RewriteTryStatement(Block* try_block, Block* catch_block,
+                                 Block* finally_block,
+                                 const CatchInfo& catch_info, int pos);
 
-  struct DeclarationDescriptor {
-    enum Kind { NORMAL, PARAMETER };
-    Parser* parser;
-    Scope* scope;
-    Scope* hoist_scope;
-    VariableMode mode;
-    int declaration_pos;
-    int initialization_pos;
-    Kind declaration_kind;
-  };
-
-  struct DeclarationParsingResult {
-    struct Declaration {
-      Declaration(Expression* pattern, int initializer_position,
-                  Expression* initializer)
-          : pattern(pattern),
-            initializer_position(initializer_position),
-            initializer(initializer) {}
-
-      Expression* pattern;
-      int initializer_position;
-      Expression* initializer;
-    };
-
-    DeclarationParsingResult()
-        : declarations(4),
-          first_initializer_loc(Scanner::Location::invalid()),
-          bindings_loc(Scanner::Location::invalid()) {}
-
-    Block* BuildInitializationBlock(ZoneList<const AstRawString*>* names,
-                                    bool* ok);
-
-    DeclarationDescriptor descriptor;
-    List<Declaration> declarations;
-    Scanner::Location first_initializer_loc;
-    Scanner::Location bindings_loc;
-  };
+  Statement* DeclareFunction(const AstRawString* variable_name,
+                             FunctionLiteral* function, int pos,
+                             bool is_generator, bool is_async,
+                             ZoneList<const AstRawString*>* names, bool* ok);
+  V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
+                                    Expression* value,
+                                    ZoneList<const AstRawString*>* names,
+                                    int class_token_pos, int end_pos, bool* ok);
+  V8_INLINE void DeclareClassVariable(const AstRawString* name,
+                                      Scope* block_scope, ClassInfo* class_info,
+                                      int class_token_pos, bool* ok);
+  V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
+                                      ClassLiteralProperty* property,
+                                      ClassInfo* class_info, bool* ok);
+  V8_INLINE Expression* RewriteClassLiteral(const AstRawString* name,
+                                            ClassInfo* class_info, int pos,
+                                            bool* ok);
+  V8_INLINE Statement* DeclareNative(const AstRawString* name, int pos,
+                                     bool* ok);
 
   class PatternRewriter final : public AstVisitor<PatternRewriter> {
    public:
     static void DeclareAndInitializeVariables(
-        Block* block, const DeclarationDescriptor* declaration_descriptor,
+        Parser* parser, Block* block,
+        const DeclarationDescriptor* declaration_descriptor,
         const DeclarationParsingResult::Declaration* declaration,
         ZoneList<const AstRawString*>* names, bool* ok);
 
@@ -627,47 +393,12 @@
     DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
   };
 
-  Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                   DeclarationParsingResult* parsing_result,
-                                   ZoneList<const AstRawString*>* names,
-                                   bool* ok);
-  Statement* ParseExpressionOrLabelledStatement(
-      ZoneList<const AstRawString*>* labels,
-      AllowLabelledFunctionStatement allow_function, bool* ok);
-  IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
-                                bool* ok);
-  Statement* ParseContinueStatement(bool* ok);
-  Statement* ParseBreakStatement(ZoneList<const AstRawString*>* labels,
-                                 bool* ok);
-  Statement* ParseReturnStatement(bool* ok);
-  Statement* ParseWithStatement(ZoneList<const AstRawString*>* labels,
-                                bool* ok);
-  CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
-  Statement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
-                                  bool* ok);
-  DoWhileStatement* ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
-                                          bool* ok);
-  WhileStatement* ParseWhileStatement(ZoneList<const AstRawString*>* labels,
-                                      bool* ok);
-  Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
-  Statement* ParseThrowStatement(bool* ok);
-  Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
-  TryStatement* ParseTryStatement(bool* ok);
-  DebuggerStatement* ParseDebuggerStatement(bool* ok);
-  // Parse a SubStatement in strict mode, or with an extra block scope in
-  // sloppy mode to handle
-  // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
-  // The legacy parameter indicates whether function declarations are
-  // banned by the ES2015 specification in this location, and they are being
-  // permitted here to match previous V8 behavior.
-  Statement* ParseScopedStatement(ZoneList<const AstRawString*>* labels,
-                                  bool legacy, bool* ok);
-
   // !%_IsJSReceiver(result = iterator.next()) &&
   //     %ThrowIteratorResultNotAnObject(result)
   Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
                                       int pos);
 
+  Expression* GetIterator(Expression* iterable, int pos);
 
   // Initialize the components of a for-in / for-of statement.
   Statement* InitializeForEachStatement(ForEachStatement* stmt,
@@ -677,18 +408,17 @@
                                       Expression* iterable, Statement* body,
                                       bool finalize,
                                       int next_result_pos = kNoSourcePosition);
+  Block* RewriteForVarInLegacy(const ForInfo& for_info);
+  void DesugarBindingInForEachStatement(ForInfo* for_info, Block** body_block,
+                                        Expression** each_variable, bool* ok);
+  Block* CreateForEachStatementTDZ(Block* init_block, const ForInfo& for_info,
+                                   bool* ok);
+
   Statement* DesugarLexicalBindingsInForStatement(
-      Scope* inner_scope, VariableMode mode,
-      ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
-      Expression* cond, Statement* next, Statement* body, bool* ok);
+      ForStatement* loop, Statement* init, Expression* cond, Statement* next,
+      Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok);
 
-  void DesugarAsyncFunctionBody(const AstRawString* function_name, Scope* scope,
-                                ZoneList<Statement*>* body,
-                                Type::ExpressionClassifier* classifier,
-                                FunctionKind kind, FunctionBodyType type,
-                                bool accept_IN, int pos, bool* ok);
-
-  void RewriteDoExpression(Expression* expr, bool* ok);
+  Expression* RewriteDoExpression(Block* body, int pos, bool* ok);
 
   FunctionLiteral* ParseFunctionLiteral(
       const AstRawString* name, Scanner::Location function_name_location,
@@ -696,14 +426,10 @@
       int function_token_position, FunctionLiteral::FunctionType type,
       LanguageMode language_mode, bool* ok);
 
-  Expression* ParseClassLiteral(ExpressionClassifier* classifier,
-                                const AstRawString* name,
-                                Scanner::Location class_name_location,
-                                bool name_is_strict_reserved, int pos,
-                                bool* ok);
-
-  // Magical syntax support.
-  Expression* ParseV8Intrinsic(bool* ok);
+  Expression* InstallHomeObject(Expression* function_literal,
+                                Expression* home_object);
+  FunctionLiteral* SynthesizeClassFieldInitializer(int count);
+  FunctionLiteral* InsertClassFieldInitializer(FunctionLiteral* constructor);
 
   // Get odd-ball literals.
   Literal* GetLiteralUndefined(int position);
@@ -724,14 +450,11 @@
   void InsertShadowingVarBindingInitializers(Block* block);
 
   // Implement sloppy block-scoped functions, ES2015 Annex B 3.3
-  void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
-                                            Scope* complex_params_scope,
-                                            bool* ok);
+  void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
 
-  static InitializationFlag DefaultInitializationFlag(VariableMode mode);
   VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
                                int end_pos = kNoSourcePosition,
-                               Variable::Kind kind = Variable::NORMAL);
+                               VariableKind kind = NORMAL_VARIABLE);
   VariableProxy* NewUnresolved(const AstRawString* name);
   Variable* Declare(Declaration* declaration,
                     DeclarationDescriptor::Kind declaration_kind,
@@ -750,25 +473,24 @@
 
   // Factory methods.
   FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
-                                      int pos, int end_pos,
-                                      LanguageMode language_mode);
+                                      bool requires_class_field_init, int pos,
+                                      int end_pos, LanguageMode language_mode);
 
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
-  //
-  // If bookmark is set, the (pre-)parser may decide to abort skipping
+  // If may_abort == true, the (pre-)parser may decide to abort skipping
   // in order to force the function to be eagerly parsed, after all.
-  // In this case, it'll reset the scanner using the bookmark.
-  void SkipLazyFunctionBody(int* materialized_literal_count,
-                            int* expected_property_count, bool* ok,
-                            Scanner::BookmarkScope* bookmark = nullptr);
+  LazyParsingResult SkipLazyFunctionBody(int* materialized_literal_count,
+                                         int* expected_property_count,
+                                         bool is_inner_function, bool may_abort,
+                                         bool* ok);
 
   PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
-      SingletonLogger* logger, Scanner::BookmarkScope* bookmark = nullptr);
+      SingletonLogger* logger, bool is_inner_function, bool may_abort);
 
   Block* BuildParameterInitializationBlock(
       const ParserFormalParameters& parameters, bool* ok);
-  Block* BuildRejectPromiseOnException(Block* block);
+  Block* BuildRejectPromiseOnException(Block* block, bool* ok);
 
   // Consumes the ending }.
   ZoneList<Statement*>* ParseEagerFunctionBody(
@@ -817,25 +539,16 @@
                                    Expression* tag);
   uint32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
 
-  void ParseAsyncArrowSingleExpressionBody(ZoneList<Statement*>* body,
-                                           bool accept_IN,
-                                           ExpressionClassifier* classifier,
-                                           int pos, bool* ok) {
-    DesugarAsyncFunctionBody(ast_value_factory()->empty_string(), scope(), body,
-                             classifier, kAsyncArrowFunction,
-                             FunctionBodyType::kSingleExpression, accept_IN,
-                             pos, ok);
-  }
-
-  ZoneList<v8::internal::Expression*>* PrepareSpreadArguments(
-      ZoneList<v8::internal::Expression*>* list);
-  Expression* SpreadCall(Expression* function,
-                         ZoneList<v8::internal::Expression*>* args, int pos);
-  Expression* SpreadCallNew(Expression* function,
-                            ZoneList<v8::internal::Expression*>* args, int pos);
+  ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
+  Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
+                         int pos);
+  Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
+                            int pos);
+  Expression* CallClassFieldInitializer(Scope* scope, Expression* this_expr);
+  Expression* RewriteSuperCall(Expression* call_expression);
 
   void SetLanguageMode(Scope* scope, LanguageMode mode);
-  void RaiseLanguageMode(LanguageMode mode);
+  void SetAsmModule();
 
   V8_INLINE void MarkCollectedTailCallExpressions();
   V8_INLINE void MarkTailPosition(Expression* expression);
@@ -852,7 +565,7 @@
   V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
 
   // Rewrite expressions that are not used as patterns
-  V8_INLINE void RewriteNonPattern(ExpressionClassifier* classifier, bool* ok);
+  V8_INLINE void RewriteNonPattern(bool* ok);
 
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
       Expression* assignment);
@@ -861,41 +574,513 @@
   friend class InitializerRewriter;
   void RewriteParameterInitializer(Expression* expr, Scope* scope);
 
+  Expression* BuildInitialYield(int pos, FunctionKind kind);
   Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
-  Expression* BuildPromiseResolve(Expression* value, int pos);
-  Expression* BuildPromiseReject(Expression* value, int pos);
+  Expression* BuildResolvePromise(Expression* value, int pos);
+  Expression* BuildRejectPromise(Expression* value, int pos);
+  Variable* PromiseVariable();
 
   // Generic AST generator for throwing errors from compiled code.
   Expression* NewThrowError(Runtime::FunctionId function_id,
                             MessageTemplate::Template message,
                             const AstRawString* arg, int pos);
 
-  void FinalizeIteratorUse(Variable* completion, Expression* condition,
-                           Variable* iter, Block* iterator_use, Block* result);
+  void FinalizeIteratorUse(Scope* use_scope, Variable* completion,
+                           Expression* condition, Variable* iter,
+                           Block* iterator_use, Block* result);
 
   Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
                                     int pos);
   void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
                           Variable* input, Variable* output);
-  void BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+  void BuildIteratorCloseForCompletion(Scope* scope,
+                                       ZoneList<Statement*>* statements,
                                        Variable* iterator,
                                        Expression* completion);
   Statement* CheckCallable(Variable* var, Expression* error, int pos);
 
   V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
+  V8_INLINE void PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
+                                          FunctionKind kind, int pos);
+  V8_INLINE void RewriteAsyncFunctionBody(ZoneList<Statement*>* body,
+                                          Block* block,
+                                          Expression* return_value, bool* ok);
 
   Expression* RewriteYieldStar(Expression* generator, Expression* expression,
                                int pos);
 
-  void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
-                                          Expression* params, int end_pos,
-                                          bool* ok);
+  void AddArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+                                        Expression* params, int end_pos,
+                                        bool* ok);
   void SetFunctionName(Expression* value, const AstRawString* name);
 
+  // Helper functions for recursive descent.
+  V8_INLINE bool IsEval(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->eval_string();
+  }
+
+  V8_INLINE bool IsArguments(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->arguments_string();
+  }
+
+  V8_INLINE bool IsEvalOrArguments(const AstRawString* identifier) const {
+    return IsEval(identifier) || IsArguments(identifier);
+  }
+
+  V8_INLINE bool IsUndefined(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->undefined_string();
+  }
+
+  V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const {
+    return scanner()->IdentifierIsFutureStrictReserved(identifier);
+  }
+
+  // Returns true if the expression is of type "this.foo".
+  V8_INLINE static bool IsThisProperty(Expression* expression) {
+    DCHECK(expression != NULL);
+    Property* property = expression->AsProperty();
+    return property != NULL && property->obj()->IsVariableProxy() &&
+           property->obj()->AsVariableProxy()->is_this();
+  }
+
+  // This returns true if the expression is an indentifier (wrapped
+  // inside a variable proxy).  We exclude the case of 'this', which
+  // has been converted to a variable proxy.
+  V8_INLINE static bool IsIdentifier(Expression* expression) {
+    DCHECK_NOT_NULL(expression);
+    VariableProxy* operand = expression->AsVariableProxy();
+    return operand != nullptr && !operand->is_this();
+  }
+
+  V8_INLINE static const AstRawString* AsIdentifier(Expression* expression) {
+    DCHECK(IsIdentifier(expression));
+    return expression->AsVariableProxy()->raw_name();
+  }
+
+  V8_INLINE VariableProxy* AsIdentifierExpression(Expression* expression) {
+    return expression->AsVariableProxy();
+  }
+
+  V8_INLINE bool IsPrototype(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->prototype_string();
+  }
+
+  V8_INLINE bool IsConstructor(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->constructor_string();
+  }
+
+  V8_INLINE bool IsDirectEvalCall(Expression* expression) const {
+    if (!expression->IsCall()) return false;
+    expression = expression->AsCall()->expression();
+    return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+  }
+
+  V8_INLINE static bool IsBoilerplateProperty(
+      ObjectLiteral::Property* property) {
+    return ObjectLiteral::IsBoilerplateProperty(property);
+  }
+
+  V8_INLINE bool IsNative(Expression* expr) const {
+    DCHECK_NOT_NULL(expr);
+    return expr->IsVariableProxy() &&
+           expr->AsVariableProxy()->raw_name() ==
+               ast_value_factory()->native_string();
+  }
+
+  V8_INLINE static bool IsArrayIndex(const AstRawString* string,
+                                     uint32_t* index) {
+    return string->AsArrayIndex(index);
+  }
+
+  V8_INLINE bool IsUseStrictDirective(Statement* statement) const {
+    return IsStringLiteral(statement, ast_value_factory()->use_strict_string());
+  }
+
+  V8_INLINE bool IsUseAsmDirective(Statement* statement) const {
+    return IsStringLiteral(statement, ast_value_factory()->use_asm_string());
+  }
+
+  // Returns true if the statement is an expression statement containing
+  // a single string literal.  If a second argument is given, the literal
+  // is also compared with it and the result is true only if they are equal.
+  V8_INLINE bool IsStringLiteral(Statement* statement,
+                                 const AstRawString* arg = nullptr) const {
+    ExpressionStatement* e_stat = statement->AsExpressionStatement();
+    if (e_stat == nullptr) return false;
+    Literal* literal = e_stat->expression()->AsLiteral();
+    if (literal == nullptr || !literal->raw_value()->IsString()) return false;
+    return arg == nullptr || literal->raw_value()->AsString() == arg;
+  }
+
+  V8_INLINE static Expression* GetPropertyValue(LiteralProperty* property) {
+    return property->value();
+  }
+
+  V8_INLINE void GetDefaultStrings(
+      const AstRawString** default_string,
+      const AstRawString** star_default_star_string) {
+    *default_string = ast_value_factory()->default_string();
+    *star_default_star_string = ast_value_factory()->star_default_star_string();
+  }
+
+  // Functions for encapsulating the differences between parsing and preparsing;
+  // operations interleaved with the recursive descent.
+  V8_INLINE void PushLiteralName(const AstRawString* id) {
+    DCHECK_NOT_NULL(fni_);
+    fni_->PushLiteralName(id);
+  }
+
+  V8_INLINE void PushVariableName(const AstRawString* id) {
+    DCHECK_NOT_NULL(fni_);
+    fni_->PushVariableName(id);
+  }
+
+  V8_INLINE void PushPropertyName(Expression* expression) {
+    DCHECK_NOT_NULL(fni_);
+    if (expression->IsPropertyName()) {
+      fni_->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
+    } else {
+      fni_->PushLiteralName(ast_value_factory()->anonymous_function_string());
+    }
+  }
+
+  V8_INLINE void PushEnclosingName(const AstRawString* name) {
+    DCHECK_NOT_NULL(fni_);
+    fni_->PushEnclosingName(name);
+  }
+
+  V8_INLINE void AddFunctionForNameInference(FunctionLiteral* func_to_infer) {
+    DCHECK_NOT_NULL(fni_);
+    fni_->AddFunction(func_to_infer);
+  }
+
+  V8_INLINE void InferFunctionName() {
+    DCHECK_NOT_NULL(fni_);
+    fni_->Infer();
+  }
+
+  // If we assign a function literal to a property we pretenure the
+  // literal so it can be added as a constant function property.
+  V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
+      Expression* left, Expression* right) {
+    DCHECK(left != NULL);
+    if (left->IsProperty() && right->IsFunctionLiteral()) {
+      right->AsFunctionLiteral()->set_pretenure();
+    }
+  }
+
+  // Determine if the expression is a variable proxy and mark it as being used
+  // in an assignment or with a increment/decrement operator.
+  V8_INLINE static Expression* MarkExpressionAsAssigned(
+      Expression* expression) {
+    VariableProxy* proxy =
+        expression != NULL ? expression->AsVariableProxy() : NULL;
+    if (proxy != NULL) proxy->set_is_assigned();
+    return expression;
+  }
+
+  // Returns true if we have a binary expression between two numeric
+  // literals. In that case, *x will be changed to an expression which is the
+  // computed value.
+  bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
+                                              Token::Value op, int pos);
+
+  // Rewrites the following types of unary expressions:
+  // not <literal> -> true / false
+  // + <numeric literal> -> <numeric literal>
+  // - <numeric literal> -> <numeric literal with value negated>
+  // ! <literal> -> true / false
+  // The following rewriting rules enable the collection of type feedback
+  // without any special stub and the multiplication is removed later in
+  // Crankshaft's canonicalization pass.
+  // + foo -> foo * 1
+  // - foo -> foo * (-1)
+  // ~ foo -> foo ^(~0)
+  Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
+                                   int pos);
+
+  Expression* BuildIteratorResult(Expression* value, bool done);
+
+  // Generate AST node that throws a ReferenceError with the given type.
+  V8_INLINE Expression* NewThrowReferenceError(
+      MessageTemplate::Template message, int pos) {
+    return NewThrowError(Runtime::kNewReferenceError, message,
+                         ast_value_factory()->empty_string(), pos);
+  }
+
+  // Generate AST node that throws a SyntaxError with the given
+  // type. The first argument may be null (in the handle sense) in
+  // which case no arguments are passed to the constructor.
+  V8_INLINE Expression* NewThrowSyntaxError(MessageTemplate::Template message,
+                                            const AstRawString* arg, int pos) {
+    return NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
+  }
+
+  // Generate AST node that throws a TypeError with the given
+  // type. Both arguments must be non-null (in the handle sense).
+  V8_INLINE Expression* NewThrowTypeError(MessageTemplate::Template message,
+                                          const AstRawString* arg, int pos) {
+    return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
+  }
+
+  // Reporting errors.
+  V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+                                 MessageTemplate::Template message,
+                                 const char* arg = NULL,
+                                 ParseErrorType error_type = kSyntaxError) {
+    if (stack_overflow()) {
+      // Suppress the error message (syntax error or such) in the presence of a
+      // stack overflow. The isolate allows only one pending exception at at
+      // time
+      // and we want to report the stack overflow later.
+      return;
+    }
+    pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+                                           source_location.end_pos, message,
+                                           arg, error_type);
+  }
+
+  V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+                                 MessageTemplate::Template message,
+                                 const AstRawString* arg,
+                                 ParseErrorType error_type = kSyntaxError) {
+    if (stack_overflow()) {
+      // Suppress the error message (syntax error or such) in the presence of a
+      // stack overflow. The isolate allows only one pending exception at at
+      // time
+      // and we want to report the stack overflow later.
+      return;
+    }
+    pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+                                           source_location.end_pos, message,
+                                           arg, error_type);
+  }
+
+  // "null" return type creators.
+  V8_INLINE static const AstRawString* EmptyIdentifier() { return nullptr; }
+  V8_INLINE static bool IsEmptyIdentifier(const AstRawString* name) {
+    return name == nullptr;
+  }
+  V8_INLINE static Expression* EmptyExpression() { return nullptr; }
+  V8_INLINE static Literal* EmptyLiteral() { return nullptr; }
+  V8_INLINE static ObjectLiteralProperty* EmptyObjectLiteralProperty() {
+    return nullptr;
+  }
+  V8_INLINE static ClassLiteralProperty* EmptyClassLiteralProperty() {
+    return nullptr;
+  }
+  V8_INLINE static FunctionLiteral* EmptyFunctionLiteral() { return nullptr; }
+  V8_INLINE static Block* NullBlock() { return nullptr; }
+
+  V8_INLINE static bool IsEmptyExpression(Expression* expr) {
+    return expr == nullptr;
+  }
+
+  // Used in error return values.
+  V8_INLINE static ZoneList<Expression*>* NullExpressionList() {
+    return nullptr;
+  }
+  V8_INLINE static bool IsNullExpressionList(ZoneList<Expression*>* exprs) {
+    return exprs == nullptr;
+  }
+  V8_INLINE static ZoneList<Statement*>* NullStatementList() { return nullptr; }
+  V8_INLINE static bool IsNullStatementList(ZoneList<Statement*>* stmts) {
+    return stmts == nullptr;
+  }
+  V8_INLINE static Statement* NullStatement() { return nullptr; }
+  V8_INLINE bool IsNullStatement(Statement* stmt) { return stmt == nullptr; }
+  V8_INLINE bool IsEmptyStatement(Statement* stmt) {
+    DCHECK_NOT_NULL(stmt);
+    return stmt->IsEmpty();
+  }
+
+  // Non-NULL empty string.
+  V8_INLINE const AstRawString* EmptyIdentifierString() const {
+    return ast_value_factory()->empty_string();
+  }
+
+  // Odd-ball literal creators.
+  V8_INLINE Literal* GetLiteralTheHole(int position) {
+    return factory()->NewTheHoleLiteral(kNoSourcePosition);
+  }
+
+  // Producing data during the recursive descent.
+  V8_INLINE const AstRawString* GetSymbol() const {
+    const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
+    DCHECK(result != NULL);
+    return result;
+  }
+
+  V8_INLINE const AstRawString* GetNextSymbol() const {
+    return scanner()->NextSymbol(ast_value_factory());
+  }
+
+  V8_INLINE const AstRawString* GetNumberAsSymbol() const {
+    double double_value = scanner()->DoubleValue();
+    char array[100];
+    const char* string = DoubleToCString(double_value, ArrayVector(array));
+    return ast_value_factory()->GetOneByteString(string);
+  }
+
+  V8_INLINE Expression* ThisExpression(int pos = kNoSourcePosition) {
+    return NewUnresolved(ast_value_factory()->this_string(), pos, pos + 4,
+                         THIS_VARIABLE);
+  }
+
+  Expression* NewSuperPropertyReference(int pos);
+  Expression* NewSuperCallReference(int pos);
+  Expression* NewTargetExpression(int pos);
+  Expression* FunctionSentExpression(int pos);
+
+  Literal* ExpressionFromLiteral(Token::Value token, int pos);
+
+  V8_INLINE Expression* ExpressionFromIdentifier(
+      const AstRawString* name, int start_position, int end_position,
+      InferName infer = InferName::kYes) {
+    if (infer == InferName::kYes) {
+      fni_->PushVariableName(name);
+    }
+    return NewUnresolved(name, start_position, end_position);
+  }
+
+  V8_INLINE Expression* ExpressionFromString(int pos) {
+    const AstRawString* symbol = GetSymbol();
+    fni_->PushLiteralName(symbol);
+    return factory()->NewStringLiteral(symbol, pos);
+  }
+
+  V8_INLINE ZoneList<Expression*>* NewExpressionList(int size) const {
+    return new (zone()) ZoneList<Expression*>(size, zone());
+  }
+  V8_INLINE ZoneList<ObjectLiteral::Property*>* NewObjectPropertyList(
+      int size) const {
+    return new (zone()) ZoneList<ObjectLiteral::Property*>(size, zone());
+  }
+  V8_INLINE ZoneList<ClassLiteral::Property*>* NewClassPropertyList(
+      int size) const {
+    return new (zone()) ZoneList<ClassLiteral::Property*>(size, zone());
+  }
+  V8_INLINE ZoneList<Statement*>* NewStatementList(int size) const {
+    return new (zone()) ZoneList<Statement*>(size, zone());
+  }
+  V8_INLINE ZoneList<CaseClause*>* NewCaseClauseList(int size) const {
+    return new (zone()) ZoneList<CaseClause*>(size, zone());
+  }
+
+  V8_INLINE Expression* NewV8Intrinsic(const AstRawString* name,
+                                       ZoneList<Expression*>* args, int pos,
+                                       bool* ok);
+
+  V8_INLINE Statement* NewThrowStatement(Expression* exception, int pos) {
+    return factory()->NewExpressionStatement(
+        factory()->NewThrow(exception, pos), pos);
+  }
+
+  V8_INLINE void AddParameterInitializationBlock(
+      const ParserFormalParameters& parameters, ZoneList<Statement*>* body,
+      bool is_async, bool* ok) {
+    if (parameters.is_simple) return;
+    auto* init_block = BuildParameterInitializationBlock(parameters, ok);
+    if (!*ok) return;
+    if (is_async) {
+      init_block = BuildRejectPromiseOnException(init_block, ok);
+      if (!*ok) return;
+    }
+    if (init_block != nullptr) body->Add(init_block, zone());
+  }
+
+  V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
+                                    Expression* pattern,
+                                    Expression* initializer,
+                                    int initializer_end_position,
+                                    bool is_rest) {
+    bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
+    const AstRawString* name = is_simple
+                                   ? pattern->AsVariableProxy()->raw_name()
+                                   : ast_value_factory()->empty_string();
+    parameters->params.Add(
+        ParserFormalParameters::Parameter(name, pattern, initializer,
+                                          initializer_end_position, is_rest),
+        parameters->scope->zone());
+  }
+
+  V8_INLINE void DeclareFormalParameter(
+      DeclarationScope* scope,
+      const ParserFormalParameters::Parameter& parameter) {
+    bool is_duplicate = false;
+    bool is_simple = classifier()->is_simple_parameter_list();
+    auto name = is_simple || parameter.is_rest
+                    ? parameter.name
+                    : ast_value_factory()->empty_string();
+    auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
+    if (!is_simple) scope->SetHasNonSimpleParameters();
+    bool is_optional = parameter.initializer != nullptr;
+    Variable* var =
+        scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
+                                &is_duplicate, ast_value_factory());
+    if (is_duplicate) {
+      classifier()->RecordDuplicateFormalParameterError(scanner()->location());
+    }
+    if (is_sloppy(scope->language_mode())) {
+      // TODO(sigurds) Mark every parameter as maybe assigned. This is a
+      // conservative approximation necessary to account for parameters
+      // that are assigned via the arguments array.
+      var->set_maybe_assigned();
+    }
+  }
+
+  void DeclareArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+                                            Expression* params,
+                                            const Scanner::Location& params_loc,
+                                            Scanner::Location* duplicate_loc,
+                                            bool* ok);
+
+  void ReindexLiterals(const ParserFormalParameters& parameters);
+
+  V8_INLINE Expression* NoTemplateTag() { return NULL; }
+  V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
+    return tag != NULL;
+  }
+
+  V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
+
+  Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
+
+  void AddAccessorPrefixToFunctionName(bool is_get, FunctionLiteral* function,
+                                       const AstRawString* name);
+
+  void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+                                       const AstRawString* name);
+
+  void SetFunctionNameFromIdentifierRef(Expression* value,
+                                        Expression* identifier);
+
+  V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+  GetReportedErrorList() const {
+    return function_state_->GetReportedErrorList();
+  }
+
+  V8_INLINE ZoneList<Expression*>* GetNonPatternList() const {
+    return function_state_->non_patterns_to_rewrite();
+  }
+
+  V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
+    ++use_counts_[feature];
+  }
+
+  // Parser's private field members.
+  friend class DiscardableZoneScope;  // Uses reusable_preparser_.
+  // FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
+  // DeleteAll after each function), so this won't be needed.
+
   Scanner scanner_;
   PreParser* reusable_preparser_;
   Scope* original_scope_;  // for ES5 function declarations in sloppy eval
-  Target* target_stack_;  // for break, continue statements
+
+  friend class ParserTarget;
+  friend class ParserTargetScope;
+  ParserTarget* target_stack_;  // for break, continue statements
+
   ScriptCompiler::CompileOptions compile_options_;
   ParseData* cached_parse_data_;
 
@@ -908,107 +1093,8 @@
   HistogramTimer* pre_parse_timer_;
 
   bool parsing_on_main_thread_;
-
-#ifdef DEBUG
-  void Print(AstNode* node);
-#endif  // DEBUG
 };
 
-bool ParserBaseTraits<Parser>::IsFutureStrictReserved(
-    const AstRawString* identifier) const {
-  return delegate()->scanner()->IdentifierIsFutureStrictReserved(identifier);
-}
-
-const AstRawString* ParserBaseTraits<Parser>::EmptyIdentifierString() const {
-  return delegate()->ast_value_factory()->empty_string();
-}
-
-
-// Support for handling complex values (array and object literals) that
-// can be fully handled at compile time.
-class CompileTimeValue: public AllStatic {
- public:
-  enum LiteralType {
-    OBJECT_LITERAL_FAST_ELEMENTS,
-    OBJECT_LITERAL_SLOW_ELEMENTS,
-    ARRAY_LITERAL
-  };
-
-  static bool IsCompileTimeValue(Expression* expression);
-
-  // Get the value as a compile time value.
-  static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
-
-  // Get the type of a compile time value returned by GetValue().
-  static LiteralType GetLiteralType(Handle<FixedArray> value);
-
-  // Get the elements array of a compile time value returned by GetValue().
-  static Handle<FixedArray> GetElements(Handle<FixedArray> value);
-
- private:
-  static const int kLiteralTypeSlot = 0;
-  static const int kElementsSlot = 1;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
-};
-
-void ParserBaseTraits<Parser>::AddFormalParameter(
-    ParserFormalParameters* parameters, Expression* pattern,
-    Expression* initializer, int initializer_end_position, bool is_rest) {
-  bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
-  const AstRawString* name =
-      is_simple ? pattern->AsVariableProxy()->raw_name()
-                : delegate()->ast_value_factory()->empty_string();
-  parameters->params.Add(
-      ParserFormalParameters::Parameter(name, pattern, initializer,
-                                        initializer_end_position, is_rest),
-      parameters->scope->zone());
-}
-
-void ParserBaseTraits<Parser>::DeclareFormalParameter(
-    DeclarationScope* scope, const ParserFormalParameters::Parameter& parameter,
-    Type::ExpressionClassifier* classifier) {
-  bool is_duplicate = false;
-  bool is_simple = classifier->is_simple_parameter_list();
-  auto name = is_simple || parameter.is_rest
-                  ? parameter.name
-                  : delegate()->ast_value_factory()->empty_string();
-  auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
-  if (!is_simple) scope->SetHasNonSimpleParameters();
-  bool is_optional = parameter.initializer != nullptr;
-  Variable* var =
-      scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
-                              &is_duplicate, delegate()->ast_value_factory());
-  if (is_duplicate) {
-    classifier->RecordDuplicateFormalParameterError(
-        delegate()->scanner()->location());
-  }
-  if (is_sloppy(scope->language_mode())) {
-    // TODO(sigurds) Mark every parameter as maybe assigned. This is a
-    // conservative approximation necessary to account for parameters
-    // that are assigned via the arguments array.
-    var->set_maybe_assigned();
-  }
-}
-
-void ParserBaseTraits<Parser>::AddParameterInitializationBlock(
-    const ParserFormalParameters& parameters,
-    ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok) {
-  if (!parameters.is_simple) {
-    auto* init_block =
-        delegate()->BuildParameterInitializationBlock(parameters, ok);
-    if (!*ok) return;
-
-    if (is_async) {
-      init_block = delegate()->BuildRejectPromiseOnException(init_block);
-    }
-
-    if (init_block != nullptr) {
-      body->Add(init_block, delegate()->zone());
-    }
-  }
-}
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index 1831a29..7898f87 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -12,7 +12,8 @@
 namespace internal {
 
 void Parser::PatternRewriter::DeclareAndInitializeVariables(
-    Block* block, const DeclarationDescriptor* declaration_descriptor,
+    Parser* parser, Block* block,
+    const DeclarationDescriptor* declaration_descriptor,
     const DeclarationParsingResult::Declaration* declaration,
     ZoneList<const AstRawString*>* names, bool* ok) {
   PatternRewriter rewriter;
@@ -20,7 +21,7 @@
   DCHECK(block->ignore_completion_value());
 
   rewriter.scope_ = declaration_descriptor->scope;
-  rewriter.parser_ = declaration_descriptor->parser;
+  rewriter.parser_ = parser;
   rewriter.context_ = BINDING;
   rewriter.pattern_ = declaration->pattern;
   rewriter.initializer_position_ = declaration->initializer_position;
@@ -36,11 +37,12 @@
 
 void Parser::PatternRewriter::RewriteDestructuringAssignment(
     Parser* parser, RewritableExpression* to_rewrite, Scope* scope) {
-  PatternRewriter rewriter;
-
+  DCHECK(!scope->HasBeenRemoved());
   DCHECK(!to_rewrite->is_rewritten());
 
   bool ok = true;
+
+  PatternRewriter rewriter;
   rewriter.scope_ = scope;
   rewriter.parser_ = parser;
   rewriter.context_ = ASSIGNMENT;
@@ -139,23 +141,16 @@
   // which the variable or constant is declared. Only function variables have
   // an initial value in the declaration (because they are initialized upon
   // entering the function).
-  //
-  // If we have a legacy const declaration, in an inner scope, the proxy
-  // is always bound to the declared variable (independent of possibly
-  // surrounding 'with' statements).
-  // For let/const declarations in harmony mode, we can also immediately
-  // pre-resolve the proxy because it resides in the same scope as the
-  // declaration.
   const AstRawString* name = pattern->raw_name();
-  VariableProxy* proxy = descriptor_->scope->NewUnresolved(
-      factory(), name, parser_->scanner()->location().beg_pos,
+  VariableProxy* proxy = factory()->NewVariableProxy(
+      name, NORMAL_VARIABLE, parser_->scanner()->location().beg_pos,
       parser_->scanner()->location().end_pos);
   Declaration* declaration = factory()->NewVariableDeclaration(
       proxy, descriptor_->scope, descriptor_->declaration_pos);
-  Variable* var = parser_->Declare(declaration, descriptor_->declaration_kind,
-                                   descriptor_->mode,
-                                   DefaultInitializationFlag(descriptor_->mode),
-                                   ok_, descriptor_->hoist_scope);
+  Variable* var = parser_->Declare(
+      declaration, descriptor_->declaration_kind, descriptor_->mode,
+      Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
+      descriptor_->hoist_scope);
   if (!*ok_) return;
   DCHECK_NOT_NULL(var);
   DCHECK(proxy->is_resolved());
@@ -267,12 +262,14 @@
 void Parser::PatternRewriter::VisitRewritableExpression(
     RewritableExpression* node) {
   // If this is not a destructuring assignment...
-  if (!IsAssignmentContext() || !node->expression()->IsAssignment()) {
+  if (!IsAssignmentContext()) {
     // Mark the node as rewritten to prevent redundant rewriting, and
     // perform BindingPattern rewriting
     DCHECK(!node->is_rewritten());
     node->Rewrite(node->expression());
     return Visit(node->expression());
+  } else if (!node->expression()->IsAssignment()) {
+    return Visit(node->expression());
   }
 
   if (node->is_rewritten()) return;
@@ -374,7 +371,7 @@
 
   auto temp = *temp_var = CreateTempVar(current_value_);
   auto iterator = CreateTempVar(parser_->GetIterator(
-      factory()->NewVariableProxy(temp), factory(), kNoSourcePosition));
+      factory()->NewVariableProxy(temp), kNoSourcePosition));
   auto done =
       CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
   auto result = CreateTempVar();
@@ -601,8 +598,9 @@
 
   Expression* closing_condition = factory()->NewUnaryOperation(
       Token::NOT, factory()->NewVariableProxy(done), nopos);
-  parser_->FinalizeIteratorUse(completion, closing_condition, iterator, block_,
-                               target);
+
+  parser_->FinalizeIteratorUse(scope(), completion, closing_condition, iterator,
+                               block_, target);
   block_ = target;
 }
 
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index b1bbbf6..88470f7 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -10,6 +10,7 @@
 #include "src/conversions.h"
 #include "src/globals.h"
 #include "src/list.h"
+#include "src/parsing/duplicate-finder.h"
 #include "src/parsing/parser-base.h"
 #include "src/parsing/preparse-data-format.h"
 #include "src/parsing/preparse-data.h"
@@ -28,34 +29,18 @@
 // thus it must never be used where only a single statement
 // is correct (e.g. an if statement branch w/o braces)!
 
-#define CHECK_OK  ok);                   \
-  if (!*ok) return Statement::Default(); \
+#define CHECK_OK_VALUE(x) ok); \
+  if (!*ok) return x;          \
   ((void)0
 #define DUMMY )  // to make indentation work
 #undef DUMMY
 
-// Used in functions where the return type is not ExpressionT.
-#define CHECK_OK_CUSTOM(x) ok); \
-  if (!*ok) return this->x();   \
-  ((void)0
-#define DUMMY )  // to make indentation work
-#undef DUMMY
+#define CHECK_OK CHECK_OK_VALUE(Expression::Default())
+#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
 
-void ParserBaseTraits<PreParser>::ReportMessageAt(
-    Scanner::Location source_location, MessageTemplate::Template message,
-    const char* arg, ParseErrorType error_type) {
-  delegate()->log_->LogMessage(source_location.beg_pos, source_location.end_pos,
-                               message, arg, error_type);
-}
+namespace {
 
-void ParserBaseTraits<PreParser>::ReportMessageAt(
-    Scanner::Location source_location, MessageTemplate::Template message,
-    const AstRawString* arg, ParseErrorType error_type) {
-  UNREACHABLE();
-}
-
-PreParserIdentifier ParserBaseTraits<PreParser>::GetSymbol(
-    Scanner* scanner) const {
+PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
   switch (scanner->current_token()) {
     case Token::ENUM:
       return PreParserIdentifier::Enum();
@@ -86,49 +71,51 @@
   }
 }
 
-PreParserExpression ParserBaseTraits<PreParser>::ExpressionFromString(
-    int pos, Scanner* scanner, PreParserFactory* factory) const {
-  if (scanner->UnescapedLiteralMatches("use strict", 10)) {
-    return PreParserExpression::UseStrictStringLiteral();
+}  // unnamed namespace
+
+PreParserIdentifier PreParser::GetSymbol() const {
+  PreParserIdentifier symbol = GetSymbolHelper(scanner());
+  if (track_unresolved_variables_) {
+    const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
+    DCHECK_NOT_NULL(result);
+    symbol.string_ = result;
   }
-  return PreParserExpression::StringLiteral();
+  return symbol;
 }
 
 PreParser::PreParseResult PreParser::PreParseLazyFunction(
-    LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
-    bool parsing_module, ParserRecorder* log, Scanner::BookmarkScope* bookmark,
-    int* use_counts) {
+    DeclarationScope* function_scope, bool parsing_module, ParserRecorder* log,
+    bool is_inner_function, bool may_abort, int* use_counts) {
+  DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
   parsing_module_ = parsing_module;
   log_ = log;
   use_counts_ = use_counts;
-  // Lazy functions always have trivial outer scopes (no with/catch scopes).
+  DCHECK(!track_unresolved_variables_);
+  track_unresolved_variables_ = is_inner_function;
+
+  // The caller passes the function_scope which is not yet inserted into the
+  // scope_state_. All scopes above the function_scope are ignored by the
+  // PreParser.
   DCHECK_NULL(scope_state_);
-  DeclarationScope* top_scope = NewScriptScope();
-  FunctionState top_state(&function_state_, &scope_state_, top_scope,
-                          kNormalFunction);
-  scope()->SetLanguageMode(language_mode);
-  DeclarationScope* function_scope = NewFunctionScope(kind);
-  if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
-  FunctionState function_state(&function_state_, &scope_state_, function_scope,
-                               kind);
+  FunctionState function_state(&function_state_, &scope_state_, function_scope);
   DCHECK_EQ(Token::LBRACE, scanner()->current_token());
   bool ok = true;
   int start_position = peek_position();
-  ParseLazyFunctionLiteralBody(&ok, bookmark);
+  LazyParsingResult result = ParseLazyFunctionLiteralBody(may_abort, &ok);
   use_counts_ = nullptr;
-  if (bookmark && bookmark->HasBeenReset()) {
-    // Do nothing, as we've just aborted scanning this function.
+  track_unresolved_variables_ = false;
+  if (result == kLazyParsingAborted) {
+    return kPreParseAbort;
   } else if (stack_overflow()) {
     return kPreParseStackOverflow;
   } else if (!ok) {
     ReportUnexpectedToken(scanner()->current_token());
   } else {
     DCHECK_EQ(Token::RBRACE, scanner()->peek());
-    if (is_strict(scope()->language_mode())) {
+    if (is_strict(function_scope->language_mode())) {
       int end_pos = scanner()->location().end_pos;
       CheckStrictOctalLiteral(start_position, end_pos, &ok);
-      CheckDecimalLiteralWithLeadingZero(use_counts, start_position, end_pos);
-      if (!ok) return kPreParseSuccess;
+      CheckDecimalLiteralWithLeadingZero(start_position, end_pos);
     }
   }
   return kPreParseSuccess;
@@ -148,908 +135,6 @@
 // That means that contextual checks (like a label being declared where
 // it is used) are generally omitted.
 
-
-PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
-  // ECMA 262 6th Edition
-  // StatementListItem[Yield, Return] :
-  //   Statement[?Yield, ?Return]
-  //   Declaration[?Yield]
-  //
-  // Declaration[Yield] :
-  //   HoistableDeclaration[?Yield]
-  //   ClassDeclaration[?Yield]
-  //   LexicalDeclaration[In, ?Yield]
-  //
-  // HoistableDeclaration[Yield, Default] :
-  //   FunctionDeclaration[?Yield, ?Default]
-  //   GeneratorDeclaration[?Yield, ?Default]
-  //
-  // LexicalDeclaration[In, Yield] :
-  //   LetOrConst BindingList[?In, ?Yield] ;
-
-  switch (peek()) {
-    case Token::FUNCTION:
-      return ParseHoistableDeclaration(ok);
-    case Token::CLASS:
-      return ParseClassDeclaration(ok);
-    case Token::CONST:
-      return ParseVariableStatement(kStatementListItem, ok);
-    case Token::LET:
-      if (IsNextLetKeyword()) {
-        return ParseVariableStatement(kStatementListItem, ok);
-      }
-      break;
-    case Token::ASYNC:
-      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
-          !scanner()->HasAnyLineTerminatorAfterNext()) {
-        Consume(Token::ASYNC);
-        return ParseAsyncFunctionDeclaration(ok);
-      }
-    /* falls through */
-    default:
-      break;
-  }
-  return ParseStatement(kAllowLabelledFunctionStatement, ok);
-}
-
-
-void PreParser::ParseStatementList(int end_token, bool* ok,
-                                   Scanner::BookmarkScope* bookmark) {
-  // SourceElements ::
-  //   (Statement)* <end_token>
-
-  // Bookkeeping for trial parse if bookmark is set:
-  DCHECK_IMPLIES(bookmark, bookmark->HasBeenSet());
-  bool maybe_reset = bookmark != nullptr;
-  int count_statements = 0;
-
-  bool directive_prologue = true;
-  while (peek() != end_token) {
-    if (directive_prologue && peek() != Token::STRING) {
-      directive_prologue = false;
-    }
-    bool starts_with_identifier = peek() == Token::IDENTIFIER;
-    Scanner::Location token_loc = scanner()->peek_location();
-    Statement statement = ParseStatementListItem(CHECK_OK_CUSTOM(Void));
-
-    if (directive_prologue) {
-      bool use_strict_found = statement.IsUseStrictLiteral();
-
-      if (use_strict_found) {
-        scope()->SetLanguageMode(
-            static_cast<LanguageMode>(scope()->language_mode() | STRICT));
-      } else if (!statement.IsStringLiteral()) {
-        directive_prologue = false;
-      }
-
-      if (use_strict_found && !scope()->HasSimpleParameters()) {
-        // TC39 deemed "use strict" directives to be an error when occurring
-        // in the body of a function with non-simple parameter list, on
-        // 29/7/2015. https://goo.gl/ueA7Ln
-        ReportMessageAt(token_loc,
-                        MessageTemplate::kIllegalLanguageModeDirective,
-                        "use strict");
-        *ok = false;
-        return;
-      }
-    }
-
-    // If we're allowed to reset to a bookmark, we will do so when we see a long
-    // and trivial function.
-    // Our current definition of 'long and trivial' is:
-    // - over 200 statements
-    // - all starting with an identifier (i.e., no if, for, while, etc.)
-    if (maybe_reset && (!starts_with_identifier ||
-                        ++count_statements > kLazyParseTrialLimit)) {
-      if (count_statements > kLazyParseTrialLimit) {
-        bookmark->Reset();
-        return;
-      }
-      maybe_reset = false;
-    }
-  }
-}
-
-
-PreParser::Statement PreParser::ParseStatement(
-    AllowLabelledFunctionStatement allow_function, bool* ok) {
-  // Statement ::
-  //   EmptyStatement
-  //   ...
-
-  if (peek() == Token::SEMICOLON) {
-    Next();
-    return Statement::Default();
-  }
-  return ParseSubStatement(allow_function, ok);
-}
-
-PreParser::Statement PreParser::ParseScopedStatement(bool legacy, bool* ok) {
-  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
-      (legacy && allow_harmony_restrictive_declarations())) {
-    return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
-  } else {
-    BlockState block_state(&scope_state_);
-    return ParseFunctionDeclaration(ok);
-  }
-}
-
-PreParser::Statement PreParser::ParseSubStatement(
-    AllowLabelledFunctionStatement allow_function, bool* ok) {
-  // Statement ::
-  //   Block
-  //   VariableStatement
-  //   EmptyStatement
-  //   ExpressionStatement
-  //   IfStatement
-  //   IterationStatement
-  //   ContinueStatement
-  //   BreakStatement
-  //   ReturnStatement
-  //   WithStatement
-  //   LabelledStatement
-  //   SwitchStatement
-  //   ThrowStatement
-  //   TryStatement
-  //   DebuggerStatement
-
-  // Note: Since labels can only be used by 'break' and 'continue'
-  // statements, which themselves are only valid within blocks,
-  // iterations or 'switch' statements (i.e., BreakableStatements),
-  // labels can be simply ignored in all other cases; except for
-  // trivial labeled break statements 'label: break label' which is
-  // parsed into an empty statement.
-
-  // Keep the source position of the statement
-  switch (peek()) {
-    case Token::LBRACE:
-      return ParseBlock(ok);
-
-    case Token::SEMICOLON:
-      Next();
-      return Statement::Default();
-
-    case Token::IF:
-      return ParseIfStatement(ok);
-
-    case Token::DO:
-      return ParseDoWhileStatement(ok);
-
-    case Token::WHILE:
-      return ParseWhileStatement(ok);
-
-    case Token::FOR:
-      return ParseForStatement(ok);
-
-    case Token::CONTINUE:
-      return ParseContinueStatement(ok);
-
-    case Token::BREAK:
-      return ParseBreakStatement(ok);
-
-    case Token::RETURN:
-      return ParseReturnStatement(ok);
-
-    case Token::WITH:
-      return ParseWithStatement(ok);
-
-    case Token::SWITCH:
-      return ParseSwitchStatement(ok);
-
-    case Token::THROW:
-      return ParseThrowStatement(ok);
-
-    case Token::TRY:
-      return ParseTryStatement(ok);
-
-    case Token::FUNCTION:
-      // FunctionDeclaration only allowed as a StatementListItem, not in
-      // an arbitrary Statement position. Exceptions such as
-      // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
-      // are handled by calling ParseScopedStatement rather than
-      // ParseSubStatement directly.
-      ReportMessageAt(scanner()->peek_location(),
-                      is_strict(language_mode())
-                          ? MessageTemplate::kStrictFunction
-                          : MessageTemplate::kSloppyFunction);
-      *ok = false;
-      return Statement::Default();
-
-    case Token::DEBUGGER:
-      return ParseDebuggerStatement(ok);
-
-    case Token::VAR:
-      return ParseVariableStatement(kStatement, ok);
-
-    default:
-      return ParseExpressionOrLabelledStatement(allow_function, ok);
-  }
-}
-
-PreParser::Statement PreParser::ParseHoistableDeclaration(
-    int pos, ParseFunctionFlags flags, bool* ok) {
-  const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
-  const bool is_async = flags & ParseFunctionFlags::kIsAsync;
-  DCHECK(!is_generator || !is_async);
-
-  bool is_strict_reserved = false;
-  Identifier name = ParseIdentifierOrStrictReservedWord(
-      &is_strict_reserved, CHECK_OK);
-
-  ParseFunctionLiteral(name, scanner()->location(),
-                       is_strict_reserved ? kFunctionNameIsStrictReserved
-                                          : kFunctionNameValidityUnknown,
-                       is_generator ? FunctionKind::kGeneratorFunction
-                                    : is_async ? FunctionKind::kAsyncFunction
-                                               : FunctionKind::kNormalFunction,
-                       pos, FunctionLiteral::kDeclaration, language_mode(),
-                       CHECK_OK);
-  return Statement::FunctionDeclaration();
-}
-
-PreParser::Statement PreParser::ParseAsyncFunctionDeclaration(bool* ok) {
-  // AsyncFunctionDeclaration ::
-  //   async [no LineTerminator here] function BindingIdentifier[Await]
-  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
-  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
-  int pos = position();
-  Expect(Token::FUNCTION, CHECK_OK);
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
-  return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-PreParser::Statement PreParser::ParseHoistableDeclaration(bool* ok) {
-  // FunctionDeclaration ::
-  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
-  // GeneratorDeclaration ::
-  //   'function' '*' Identifier '(' FormalParameterListopt ')'
-  //      '{' FunctionBody '}'
-
-  Expect(Token::FUNCTION, CHECK_OK);
-  int pos = position();
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
-  if (Check(Token::MUL)) {
-    flags |= ParseFunctionFlags::kIsGenerator;
-  }
-  return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-
-PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
-  Expect(Token::CLASS, CHECK_OK);
-
-  int pos = position();
-  bool is_strict_reserved = false;
-  Identifier name =
-      ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-  ParseClassLiteral(nullptr, name, scanner()->location(), is_strict_reserved,
-                    pos, CHECK_OK);
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseBlock(bool* ok) {
-  // Block ::
-  //   '{' StatementList '}'
-
-  Expect(Token::LBRACE, CHECK_OK);
-  Statement final = Statement::Default();
-  {
-    BlockState block_state(&scope_state_);
-    while (peek() != Token::RBRACE) {
-      final = ParseStatementListItem(CHECK_OK);
-    }
-  }
-  Expect(Token::RBRACE, ok);
-  return final;
-}
-
-
-PreParser::Statement PreParser::ParseVariableStatement(
-    VariableDeclarationContext var_context,
-    bool* ok) {
-  // VariableStatement ::
-  //   VariableDeclarations ';'
-
-  Statement result = ParseVariableDeclarations(
-      var_context, nullptr, nullptr, nullptr, nullptr, nullptr, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return result;
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is also used for the parsing
-// of 'for-in' loops.
-PreParser::Statement PreParser::ParseVariableDeclarations(
-    VariableDeclarationContext var_context, int* num_decl, bool* is_lexical,
-    bool* is_binding_pattern, Scanner::Location* first_initializer_loc,
-    Scanner::Location* bindings_loc, bool* ok) {
-  // VariableDeclarations ::
-  //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-  //
-  // The ES6 Draft Rev3 specifies the following grammar for const declarations
-  //
-  // ConstDeclaration ::
-  //   const ConstBinding (',' ConstBinding)* ';'
-  // ConstBinding ::
-  //   Identifier '=' AssignmentExpression
-  //
-  // TODO(ES6):
-  // ConstBinding ::
-  //   BindingPattern '=' AssignmentExpression
-  bool require_initializer = false;
-  bool lexical = false;
-  bool is_pattern = false;
-  if (peek() == Token::VAR) {
-    Consume(Token::VAR);
-  } else if (peek() == Token::CONST) {
-    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
-    //
-    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    //
-    // However disallowing const in sloppy mode will break compatibility with
-    // existing pages. Therefore we keep allowing const with the old
-    // non-harmony semantics in sloppy mode.
-    Consume(Token::CONST);
-    DCHECK(var_context != kStatement);
-    require_initializer = true;
-    lexical = true;
-  } else if (peek() == Token::LET) {
-    Consume(Token::LET);
-    DCHECK(var_context != kStatement);
-    lexical = true;
-  } else {
-    *ok = false;
-    return Statement::Default();
-  }
-
-  // The scope of a var/const declared variable anywhere inside a function
-  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
-  // of a let declared variable is the scope of the immediately enclosing
-  // block.
-  int nvars = 0;  // the number of variables declared
-  int bindings_start = peek_position();
-  do {
-    // Parse binding pattern.
-    if (nvars > 0) Consume(Token::COMMA);
-    int decl_pos = peek_position();
-    PreParserExpression pattern = PreParserExpression::Default();
-    {
-      ExpressionClassifier pattern_classifier(this);
-      pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-
-      ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-      if (lexical) {
-        ValidateLetPattern(&pattern_classifier, CHECK_OK);
-      }
-    }
-
-    is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
-
-    Scanner::Location variable_loc = scanner()->location();
-    nvars++;
-    if (Check(Token::ASSIGN)) {
-      ExpressionClassifier classifier(this);
-      ParseAssignmentExpression(var_context != kForStatement, &classifier,
-                                CHECK_OK);
-      ValidateExpression(&classifier, CHECK_OK);
-
-      variable_loc.end_pos = scanner()->location().end_pos;
-      if (first_initializer_loc && !first_initializer_loc->IsValid()) {
-        *first_initializer_loc = variable_loc;
-      }
-    } else if ((require_initializer || is_pattern) &&
-               (var_context != kForStatement || !PeekInOrOf())) {
-      ReportMessageAt(
-          Scanner::Location(decl_pos, scanner()->location().end_pos),
-          MessageTemplate::kDeclarationMissingInitializer,
-          is_pattern ? "destructuring" : "const");
-      *ok = false;
-      return Statement::Default();
-    }
-  } while (peek() == Token::COMMA);
-
-  if (bindings_loc) {
-    *bindings_loc =
-        Scanner::Location(bindings_start, scanner()->location().end_pos);
-  }
-
-  if (num_decl != nullptr) *num_decl = nvars;
-  if (is_lexical != nullptr) *is_lexical = lexical;
-  if (is_binding_pattern != nullptr) *is_binding_pattern = is_pattern;
-  return Statement::Default();
-}
-
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
-  Consume(Token::FUNCTION);
-  int pos = position();
-  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
-  if (Check(Token::MUL)) {
-    flags |= ParseFunctionFlags::kIsGenerator;
-    if (allow_harmony_restrictive_declarations()) {
-      ReportMessageAt(scanner()->location(),
-                      MessageTemplate::kGeneratorInLegacyContext);
-      *ok = false;
-      return Statement::Default();
-    }
-  }
-  return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
-    AllowLabelledFunctionStatement allow_function, bool* ok) {
-  // ExpressionStatement | LabelledStatement ::
-  //   Expression ';'
-  //   Identifier ':' Statement
-
-  switch (peek()) {
-    case Token::FUNCTION:
-    case Token::LBRACE:
-      UNREACHABLE();  // Always handled by the callers.
-    case Token::CLASS:
-      ReportUnexpectedToken(Next());
-      *ok = false;
-      return Statement::Default();
-
-    default:
-      break;
-  }
-
-  bool starts_with_identifier = peek_any_identifier();
-  ExpressionClassifier classifier(this);
-  Expression expr = ParseExpression(true, &classifier, CHECK_OK);
-  ValidateExpression(&classifier, CHECK_OK);
-
-  // Even if the expression starts with an identifier, it is not necessarily an
-  // identifier. For example, "foo + bar" starts with an identifier but is not
-  // an identifier.
-  if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
-    // Expression is a single identifier, and not, e.g., a parenthesized
-    // identifier.
-    DCHECK(!expr.AsIdentifier().IsEnum());
-    DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
-    DCHECK(is_sloppy(language_mode()) ||
-           !IsFutureStrictReserved(expr.AsIdentifier()));
-    Consume(Token::COLON);
-    // ES#sec-labelled-function-declarations Labelled Function Declarations
-    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
-      if (allow_function == kAllowLabelledFunctionStatement) {
-        return ParseFunctionDeclaration(ok);
-      } else {
-        return ParseScopedStatement(true, ok);
-      }
-    }
-    Statement statement =
-        ParseStatement(kDisallowLabelledFunctionStatement, ok);
-    return statement.IsJumpStatement() ? Statement::Default() : statement;
-    // Preparsing is disabled for extensions (because the extension details
-    // aren't passed to lazily compiled functions), so we don't
-    // accept "native function" in the preparser.
-  }
-  // Parsed expression statement.
-  ExpectSemicolon(CHECK_OK);
-  return Statement::ExpressionStatement(expr);
-}
-
-
-PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
-  // IfStatement ::
-  //   'if' '(' Expression ')' Statement ('else' Statement)?
-
-  Expect(Token::IF, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-  Statement stat = ParseScopedStatement(false, CHECK_OK);
-  if (peek() == Token::ELSE) {
-    Next();
-    Statement else_stat = ParseScopedStatement(false, CHECK_OK);
-    stat = (stat.IsJumpStatement() && else_stat.IsJumpStatement()) ?
-        Statement::Jump() : Statement::Default();
-  } else {
-    stat = Statement::Default();
-  }
-  return stat;
-}
-
-
-PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
-  // ContinueStatement ::
-  //   'continue' [no line terminator] Identifier? ';'
-
-  Expect(Token::CONTINUE, CHECK_OK);
-  Token::Value tok = peek();
-  if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
-      tok != Token::SEMICOLON &&
-      tok != Token::RBRACE &&
-      tok != Token::EOS) {
-    // ECMA allows "eval" or "arguments" as labels even in strict mode.
-    ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  }
-  ExpectSemicolon(CHECK_OK);
-  return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
-  // BreakStatement ::
-  //   'break' [no line terminator] Identifier? ';'
-
-  Expect(Token::BREAK, CHECK_OK);
-  Token::Value tok = peek();
-  if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
-      tok != Token::SEMICOLON &&
-      tok != Token::RBRACE &&
-      tok != Token::EOS) {
-    // ECMA allows "eval" or "arguments" as labels even in strict mode.
-    ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  }
-  ExpectSemicolon(CHECK_OK);
-  return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
-  // ReturnStatement ::
-  //   'return' [no line terminator] Expression? ';'
-
-  // Consume the return token. It is necessary to do before
-  // reporting any errors on it, because of the way errors are
-  // reported (underlining).
-  Expect(Token::RETURN, CHECK_OK);
-
-  // An ECMAScript program is considered syntactically incorrect if it
-  // contains a return statement that is not within the body of a
-  // function. See ECMA-262, section 12.9, page 67.
-  // This is not handled during preparsing.
-
-  Token::Value tok = peek();
-  if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
-      tok != Token::SEMICOLON &&
-      tok != Token::RBRACE &&
-      tok != Token::EOS) {
-    // Because of the return code rewriting that happens in case of a subclass
-    // constructor we don't want to accept tail calls, therefore we don't set
-    // ReturnExprScope to kInsideValidReturnStatement here.
-    ReturnExprContext return_expr_context =
-        IsSubclassConstructor(function_state_->kind())
-            ? function_state_->return_expr_context()
-            : ReturnExprContext::kInsideValidReturnStatement;
-
-    ReturnExprScope maybe_allow_tail_calls(function_state_,
-                                           return_expr_context);
-    ParseExpression(true, CHECK_OK);
-  }
-  ExpectSemicolon(CHECK_OK);
-  return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
-  // WithStatement ::
-  //   'with' '(' Expression ')' Statement
-  Expect(Token::WITH, CHECK_OK);
-  if (is_strict(language_mode())) {
-    ReportMessageAt(scanner()->location(), MessageTemplate::kStrictWith);
-    *ok = false;
-    return Statement::Default();
-  }
-  Expect(Token::LPAREN, CHECK_OK);
-  ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-
-  Scope* with_scope = NewScope(WITH_SCOPE);
-  BlockState block_state(&scope_state_, with_scope);
-  ParseScopedStatement(true, CHECK_OK);
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
-  // SwitchStatement ::
-  //   'switch' '(' Expression ')' '{' CaseClause* '}'
-
-  Expect(Token::SWITCH, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-
-  {
-    BlockState cases_block_state(&scope_state_);
-    Expect(Token::LBRACE, CHECK_OK);
-    Token::Value token = peek();
-    while (token != Token::RBRACE) {
-      if (token == Token::CASE) {
-        Expect(Token::CASE, CHECK_OK);
-        ParseExpression(true, CHECK_OK);
-      } else {
-        Expect(Token::DEFAULT, CHECK_OK);
-      }
-      Expect(Token::COLON, CHECK_OK);
-      token = peek();
-      Statement statement = Statement::Jump();
-      while (token != Token::CASE &&
-             token != Token::DEFAULT &&
-             token != Token::RBRACE) {
-        statement = ParseStatementListItem(CHECK_OK);
-        token = peek();
-      }
-    }
-  }
-  Expect(Token::RBRACE, ok);
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
-  // DoStatement ::
-  //   'do' Statement 'while' '(' Expression ')' ';'
-
-  Expect(Token::DO, CHECK_OK);
-  ParseScopedStatement(true, CHECK_OK);
-  Expect(Token::WHILE, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, ok);
-  if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
-  // WhileStatement ::
-  //   'while' '(' Expression ')' Statement
-
-  Expect(Token::WHILE, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  ParseExpression(true, CHECK_OK);
-  Expect(Token::RPAREN, CHECK_OK);
-  ParseScopedStatement(true, ok);
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseForStatement(bool* ok) {
-  // ForStatement ::
-  //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
-  // Create an in-between scope for let-bound iteration variables.
-  bool has_lexical = false;
-
-  BlockState block_state(&scope_state_);
-  Expect(Token::FOR, CHECK_OK);
-  Expect(Token::LPAREN, CHECK_OK);
-  if (peek() != Token::SEMICOLON) {
-    ForEachStatement::VisitMode mode;
-    if (peek() == Token::VAR || peek() == Token::CONST ||
-        (peek() == Token::LET && IsNextLetKeyword())) {
-      int decl_count;
-      bool is_lexical;
-      bool is_binding_pattern;
-      Scanner::Location first_initializer_loc = Scanner::Location::invalid();
-      Scanner::Location bindings_loc = Scanner::Location::invalid();
-      ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
-                                &is_binding_pattern, &first_initializer_loc,
-                                &bindings_loc, CHECK_OK);
-      if (is_lexical) has_lexical = true;
-      if (CheckInOrOf(&mode, ok)) {
-        if (!*ok) return Statement::Default();
-        if (decl_count != 1) {
-          ReportMessageAt(bindings_loc,
-                          MessageTemplate::kForInOfLoopMultiBindings,
-                          ForEachStatement::VisitModeString(mode));
-          *ok = false;
-          return Statement::Default();
-        }
-        if (first_initializer_loc.IsValid() &&
-            (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
-             is_lexical || is_binding_pattern || allow_harmony_for_in())) {
-          // Only increment the use count if we would have let this through
-          // without the flag.
-          if (use_counts_ != nullptr && allow_harmony_for_in()) {
-            ++use_counts_[v8::Isolate::kForInInitializer];
-          }
-          ReportMessageAt(first_initializer_loc,
-                          MessageTemplate::kForInOfLoopInitializer,
-                          ForEachStatement::VisitModeString(mode));
-          *ok = false;
-          return Statement::Default();
-        }
-
-        if (mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          ParseAssignmentExpression(true, &classifier, CHECK_OK);
-          RewriteNonPattern(&classifier, CHECK_OK);
-        } else {
-          ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-        {
-          ReturnExprScope no_tail_calls(function_state_,
-                                        ReturnExprContext::kInsideForInOfBody);
-          ParseScopedStatement(true, CHECK_OK);
-        }
-        return Statement::Default();
-      }
-    } else {
-      int lhs_beg_pos = peek_position();
-      ExpressionClassifier classifier(this);
-      Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
-      int lhs_end_pos = scanner()->location().end_pos;
-      bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
-      bool is_destructuring = is_for_each &&
-                              (lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
-
-      if (is_destructuring) {
-        ValidateAssignmentPattern(&classifier, CHECK_OK);
-      } else {
-        ValidateExpression(&classifier, CHECK_OK);
-      }
-
-      if (is_for_each) {
-        if (!is_destructuring) {
-          lhs = CheckAndRewriteReferenceExpression(
-              lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
-              kSyntaxError, CHECK_OK);
-        }
-
-        if (mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          ParseAssignmentExpression(true, &classifier, CHECK_OK);
-          RewriteNonPattern(&classifier, CHECK_OK);
-        } else {
-          ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-        {
-          BlockState block_state(&scope_state_);
-          ParseScopedStatement(true, CHECK_OK);
-        }
-        return Statement::Default();
-      }
-    }
-  }
-
-  // Parsed initializer at this point.
-  Expect(Token::SEMICOLON, CHECK_OK);
-
-  // If there are let bindings, then condition and the next statement of the
-  // for loop must be parsed in a new scope.
-  Scope* inner_scope = scope();
-  // TODO(verwaest): Allocate this through a ScopeState as well.
-  if (has_lexical) inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
-
-  {
-    BlockState block_state(&scope_state_, inner_scope);
-
-    if (peek() != Token::SEMICOLON) {
-      ParseExpression(true, CHECK_OK);
-    }
-    Expect(Token::SEMICOLON, CHECK_OK);
-
-    if (peek() != Token::RPAREN) {
-      ParseExpression(true, CHECK_OK);
-    }
-    Expect(Token::RPAREN, CHECK_OK);
-
-    ParseScopedStatement(true, ok);
-  }
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
-  // ThrowStatement ::
-  //   'throw' [no line terminator] Expression ';'
-
-  Expect(Token::THROW, CHECK_OK);
-  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
-    ReportMessageAt(scanner()->location(), MessageTemplate::kNewlineAfterThrow);
-    *ok = false;
-    return Statement::Default();
-  }
-  ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(ok);
-  return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
-  // TryStatement ::
-  //   'try' Block Catch
-  //   'try' Block Finally
-  //   'try' Block Catch Finally
-  //
-  // Catch ::
-  //   'catch' '(' Identifier ')' Block
-  //
-  // Finally ::
-  //   'finally' Block
-
-  Expect(Token::TRY, CHECK_OK);
-
-  {
-    ReturnExprScope no_tail_calls(function_state_,
-                                  ReturnExprContext::kInsideTryBlock);
-    ParseBlock(CHECK_OK);
-  }
-
-  Token::Value tok = peek();
-  if (tok != Token::CATCH && tok != Token::FINALLY) {
-    ReportMessageAt(scanner()->location(), MessageTemplate::kNoCatchOrFinally);
-    *ok = false;
-    return Statement::Default();
-  }
-  TailCallExpressionList tail_call_expressions_in_catch_block(zone());
-  bool catch_block_exists = false;
-  if (tok == Token::CATCH) {
-    Consume(Token::CATCH);
-    Expect(Token::LPAREN, CHECK_OK);
-    Scope* catch_scope = NewScope(CATCH_SCOPE);
-    ExpressionClassifier pattern_classifier(this);
-    ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-    ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-    Expect(Token::RPAREN, CHECK_OK);
-    {
-      CollectExpressionsInTailPositionToListScope
-          collect_tail_call_expressions_scope(
-              function_state_, &tail_call_expressions_in_catch_block);
-      BlockState block_state(&scope_state_, catch_scope);
-      {
-        BlockState block_state(&scope_state_);
-        ParseBlock(CHECK_OK);
-      }
-    }
-    catch_block_exists = true;
-    tok = peek();
-  }
-  if (tok == Token::FINALLY) {
-    Consume(Token::FINALLY);
-    ParseBlock(CHECK_OK);
-    if (FLAG_harmony_explicit_tailcalls && catch_block_exists &&
-        tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
-      // TODO(ishell): update chapter number.
-      // ES8 XX.YY.ZZ
-      ReportMessageAt(tail_call_expressions_in_catch_block.location(),
-                      MessageTemplate::kUnexpectedTailCallInCatchBlock);
-      *ok = false;
-      return Statement::Default();
-    }
-  }
-  return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
-  // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
-  // contexts this is used as a statement which invokes the debugger as if a
-  // break point is present.
-  // DebuggerStatement ::
-  //   'debugger' ';'
-
-  Expect(Token::DEBUGGER, CHECK_OK);
-  ExpectSemicolon(ok);
-  return Statement::Default();
-}
-
-
-// Redefinition of CHECK_OK for parsing expressions.
-#undef CHECK_OK
-#define CHECK_OK  ok);                     \
-  if (!*ok) return Expression::Default();  \
-  ((void)0
-#define DUMMY )  // to make indentation work
-#undef DUMMY
-
-
 PreParser::Expression PreParser::ParseFunctionLiteral(
     Identifier function_name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1059,11 +144,11 @@
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
 
   // Parse function body.
+  PreParserStatementList body;
   bool outer_is_script_scope = scope()->is_script_scope();
   DeclarationScope* function_scope = NewFunctionScope(kind);
   function_scope->SetLanguageMode(language_mode);
-  FunctionState function_state(&function_state_, &scope_state_, function_scope,
-                               kind);
+  FunctionState function_state(&function_state_, &scope_state_, function_scope);
   DuplicateFinder duplicate_finder(scanner()->unicode_cache());
   ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
@@ -1071,7 +156,7 @@
   int start_position = scanner()->location().beg_pos;
   function_scope->set_start_position(start_position);
   PreParserFormalParameters formals(function_scope);
-  ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
+  ParseFormalParameterList(&formals, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
   int formals_end_position = scanner()->location().end_pos;
 
@@ -1085,9 +170,9 @@
 
   Expect(Token::LBRACE, CHECK_OK);
   if (is_lazily_parsed) {
-    ParseLazyFunctionLiteralBody(CHECK_OK);
+    ParseLazyFunctionLiteralBody(false, CHECK_OK);
   } else {
-    ParseStatementList(Token::RBRACE, CHECK_OK);
+    ParseStatementList(body, Token::RBRACE, CHECK_OK);
   }
   Expect(Token::RBRACE, CHECK_OK);
 
@@ -1100,52 +185,24 @@
                     function_name_location, CHECK_OK);
   const bool allow_duplicate_parameters =
       is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
-  ValidateFormalParameters(&formals_classifier, language_mode,
-                           allow_duplicate_parameters, CHECK_OK);
+  ValidateFormalParameters(language_mode, allow_duplicate_parameters, CHECK_OK);
 
   if (is_strict(language_mode)) {
     int end_position = scanner()->location().end_pos;
     CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
-    CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
-                                       end_position);
+    CheckDecimalLiteralWithLeadingZero(start_position, end_position);
   }
 
   return Expression::Default();
 }
 
-PreParser::Expression PreParser::ParseAsyncFunctionExpression(bool* ok) {
-  // AsyncFunctionDeclaration ::
-  //   async [no LineTerminator here] function ( FormalParameters[Await] )
-  //       { AsyncFunctionBody }
-  //
-  //   async [no LineTerminator here] function BindingIdentifier[Await]
-  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
-  int pos = position();
-  Expect(Token::FUNCTION, CHECK_OK);
-  bool is_strict_reserved = false;
-  Identifier name;
-  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
-
-  if (peek_any_identifier()) {
-    type = FunctionLiteral::kNamedExpression;
-    name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
-                                               &is_strict_reserved, CHECK_OK);
-  }
-
-  ParseFunctionLiteral(name, scanner()->location(),
-                       is_strict_reserved ? kFunctionNameIsStrictReserved
-                                          : kFunctionNameValidityUnknown,
-                       FunctionKind::kAsyncFunction, pos, type, language_mode(),
-                       CHECK_OK);
-  return Expression::Default();
-}
-
-void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
-                                             Scanner::BookmarkScope* bookmark) {
+PreParser::LazyParsingResult PreParser::ParseLazyFunctionLiteralBody(
+    bool may_abort, bool* ok) {
   int body_start = position();
-  ParseStatementList(Token::RBRACE, ok, bookmark);
-  if (!*ok) return;
-  if (bookmark && bookmark->HasBeenReset()) return;
+  PreParserStatementList body;
+  LazyParsingResult result = ParseStatementList(
+      body, Token::RBRACE, may_abort, CHECK_OK_VALUE(kLazyParsingComplete));
+  if (result == kLazyParsingAborted) return result;
 
   // Position right after terminal '}'.
   DCHECK_EQ(Token::RBRACE, scanner()->peek());
@@ -1156,113 +213,45 @@
                     function_state_->materialized_literal_count(),
                     function_state_->expected_property_count(), language_mode(),
                     scope->uses_super_property(), scope->calls_eval());
+  return kLazyParsingComplete;
 }
 
-PreParserExpression PreParser::ParseClassLiteral(
-    ExpressionClassifier* classifier, PreParserIdentifier name,
-    Scanner::Location class_name_location, bool name_is_strict_reserved,
-    int pos, bool* ok) {
-  // All parts of a ClassDeclaration and ClassExpression are strict code.
-  if (name_is_strict_reserved) {
-    ReportMessageAt(class_name_location,
-                    MessageTemplate::kUnexpectedStrictReserved);
-    *ok = false;
-    return EmptyExpression();
+PreParserExpression PreParser::ExpressionFromIdentifier(
+    PreParserIdentifier name, int start_position, int end_position,
+    InferName infer) {
+  if (track_unresolved_variables_) {
+    AstNodeFactory factory(ast_value_factory());
+    // Setting the Zone is necessary because zone_ might be the temp Zone, and
+    // AstValueFactory doesn't know about it.
+    factory.set_zone(zone());
+    DCHECK_NOT_NULL(name.string_);
+    scope()->NewUnresolved(&factory, name.string_, start_position, end_position,
+                           NORMAL_VARIABLE);
   }
-  if (IsEvalOrArguments(name)) {
-    ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
-    *ok = false;
-    return EmptyExpression();
-  }
+  return PreParserExpression::FromIdentifier(name);
+}
 
-  LanguageMode class_language_mode = language_mode();
-  BlockState block_state(&scope_state_);
-  scope()->SetLanguageMode(
-      static_cast<LanguageMode>(class_language_mode | STRICT));
-  // TODO(marja): Make PreParser use scope names too.
-  // this->scope()->SetScopeName(name);
+void PreParser::DeclareAndInitializeVariables(
+    PreParserStatement block,
+    const DeclarationDescriptor* declaration_descriptor,
+    const DeclarationParsingResult::Declaration* declaration,
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  if (declaration->pattern.string_) {
+    /* Mimic what Parser does when declaring variables (see
+       Parser::PatternRewriter::VisitVariableProxy).
 
-  bool has_extends = Check(Token::EXTENDS);
-  if (has_extends) {
-    ExpressionClassifier extends_classifier(this);
-    ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
-    CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
-    ValidateExpression(&extends_classifier, CHECK_OK);
-    if (classifier != nullptr) {
-      classifier->Accumulate(&extends_classifier,
-                             ExpressionClassifier::ExpressionProductions);
+       var + no initializer -> RemoveUnresolved
+       let + no initializer -> RemoveUnresolved
+       var + initializer -> RemoveUnresolved followed by NewUnresolved
+       let + initializer -> RemoveUnresolved
+    */
+
+    if (declaration->initializer.IsEmpty() ||
+        declaration_descriptor->mode == VariableMode::LET) {
+      declaration_descriptor->scope->RemoveUnresolved(
+          declaration->pattern.string_);
     }
   }
-
-  ClassLiteralChecker checker(this);
-  bool has_seen_constructor = false;
-
-  Expect(Token::LBRACE, CHECK_OK);
-  while (peek() != Token::RBRACE) {
-    if (Check(Token::SEMICOLON)) continue;
-    const bool in_class = true;
-    bool is_computed_name = false;  // Classes do not care about computed
-                                    // property names here.
-    Identifier name;
-    ExpressionClassifier property_classifier(this);
-    ParsePropertyDefinition(
-        &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
-        &has_seen_constructor, &property_classifier, &name, CHECK_OK);
-    ValidateExpression(&property_classifier, CHECK_OK);
-    if (classifier != nullptr) {
-      classifier->Accumulate(&property_classifier,
-                             ExpressionClassifier::ExpressionProductions);
-    }
-  }
-
-  Expect(Token::RBRACE, CHECK_OK);
-
-  return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
-  // CallRuntime ::
-  //   '%' Identifier Arguments
-  Expect(Token::MOD, CHECK_OK);
-  if (!allow_natives()) {
-    *ok = false;
-    return Expression::Default();
-  }
-  // Allow "eval" or "arguments" for backward compatibility.
-  ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
-  Scanner::Location spread_pos;
-  ExpressionClassifier classifier(this);
-  ParseArguments(&spread_pos, &classifier, ok);
-  ValidateExpression(&classifier, CHECK_OK);
-
-  DCHECK(!spread_pos.IsValid());
-
-  return Expression::Default();
-}
-
-
-PreParserExpression PreParser::ParseDoExpression(bool* ok) {
-  // AssignmentExpression ::
-  //     do '{' StatementList '}'
-  Expect(Token::DO, CHECK_OK);
-  Expect(Token::LBRACE, CHECK_OK);
-  while (peek() != Token::RBRACE) {
-    ParseStatementListItem(CHECK_OK);
-  }
-  Expect(Token::RBRACE, CHECK_OK);
-  return PreParserExpression::Default();
-}
-
-void PreParser::ParseAsyncArrowSingleExpressionBody(
-    PreParserStatementList body, bool accept_IN,
-    ExpressionClassifier* classifier, int pos, bool* ok) {
-  scope()->ForceContextAllocation();
-
-  PreParserExpression return_value =
-      ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_CUSTOM(Void));
-
-  body->Add(PreParserStatement::ExpressionStatement(return_value), zone());
 }
 
 #undef CHECK_OK
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index 3f268ee..4b54748 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -6,18 +6,18 @@
 #define V8_PARSING_PREPARSER_H
 
 #include "src/ast/scopes.h"
-#include "src/bailout-reason.h"
-#include "src/base/hashmap.h"
-#include "src/messages.h"
-#include "src/parsing/expression-classifier.h"
-#include "src/parsing/func-name-inferrer.h"
 #include "src/parsing/parser-base.h"
-#include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
 
 namespace v8 {
 namespace internal {
 
+// Whereas the Parser generates AST during the recursive descent,
+// the PreParser doesn't create a tree. Instead, it passes around minimal
+// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
+// just enough data for the upper layer functions. PreParserFactory is
+// responsible for creating these dummy objects. It provides a similar kind of
+// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
+// used.
 
 class PreParserIdentifier {
  public:
@@ -25,6 +25,9 @@
   static PreParserIdentifier Default() {
     return PreParserIdentifier(kUnknownIdentifier);
   }
+  static PreParserIdentifier Empty() {
+    return PreParserIdentifier(kEmptyIdentifier);
+  }
   static PreParserIdentifier Eval() {
     return PreParserIdentifier(kEvalIdentifier);
   }
@@ -64,6 +67,7 @@
   static PreParserIdentifier Async() {
     return PreParserIdentifier(kAsyncIdentifier);
   }
+  bool IsEmpty() const { return type_ == kEmptyIdentifier; }
   bool IsEval() const { return type_ == kEvalIdentifier; }
   bool IsArguments() const { return type_ == kArgumentsIdentifier; }
   bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
@@ -91,6 +95,7 @@
 
  private:
   enum Type {
+    kEmptyIdentifier,
     kUnknownIdentifier,
     kFutureReservedIdentifier,
     kFutureStrictReservedIdentifier,
@@ -107,19 +112,23 @@
     kAsyncIdentifier
   };
 
-  explicit PreParserIdentifier(Type type) : type_(type) {}
+  explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
   Type type_;
-
+  // Only non-nullptr when PreParser.track_unresolved_variables_ is true.
+  const AstRawString* string_;
   friend class PreParserExpression;
+  friend class PreParser;
 };
 
 
 class PreParserExpression {
  public:
-  PreParserExpression() : code_(TypeField::encode(kExpression)) {}
+  PreParserExpression() : code_(TypeField::encode(kEmpty)) {}
+
+  static PreParserExpression Empty() { return PreParserExpression(); }
 
   static PreParserExpression Default() {
-    return PreParserExpression();
+    return PreParserExpression(TypeField::encode(kExpression));
   }
 
   static PreParserExpression Spread(PreParserExpression expression) {
@@ -128,7 +137,8 @@
 
   static PreParserExpression FromIdentifier(PreParserIdentifier id) {
     return PreParserExpression(TypeField::encode(kIdentifierExpression) |
-                               IdentifierTypeField::encode(id.type_));
+                                   IdentifierTypeField::encode(id.type_),
+                               id.string_);
   }
 
   static PreParserExpression BinaryOperation(PreParserExpression left,
@@ -159,6 +169,11 @@
                                IsUseStrictField::encode(true));
   }
 
+  static PreParserExpression UseAsmStringLiteral() {
+    return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+                               IsUseAsmField::encode(true));
+  }
+
   static PreParserExpression This() {
     return PreParserExpression(TypeField::encode(kExpression) |
                                ExpressionTypeField::encode(kThisExpression));
@@ -199,6 +214,8 @@
         ExpressionTypeField::encode(kNoTemplateTagExpression));
   }
 
+  bool IsEmpty() const { return TypeField::decode(code_) == kEmpty; }
+
   bool IsIdentifier() const {
     return TypeField::decode(code_) == kIdentifierExpression;
   }
@@ -230,6 +247,11 @@
            IsUseStrictField::decode(code_);
   }
 
+  bool IsUseAsmLiteral() const {
+    return TypeField::decode(code_) == kStringLiteralExpression &&
+           IsUseAsmField::decode(code_);
+  }
+
   bool IsThis() const {
     return TypeField::decode(code_) == kExpression &&
            ExpressionTypeField::decode(code_) == kThisExpression;
@@ -275,7 +297,7 @@
            ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
   }
 
-  bool IsSpreadExpression() const {
+  bool IsSpread() const {
     return TypeField::decode(code_) == kSpreadExpression;
   }
 
@@ -292,12 +314,16 @@
   // More dummy implementations of things PreParser doesn't need to track:
   void set_index(int index) {}  // For YieldExpressions
   void set_should_eager_compile() {}
+  void set_should_be_used_once_hint() {}
 
   int position() const { return kNoSourcePosition; }
   void set_function_token_position(int position) {}
 
+  void set_is_class_field_initializer(bool is_class_field_initializer) {}
+
  private:
   enum Type {
+    kEmpty,
     kExpression,
     kIdentifierExpression,
     kStringLiteralExpression,
@@ -318,8 +344,9 @@
     kAssignment
   };
 
-  explicit PreParserExpression(uint32_t expression_code)
-      : code_(expression_code) {}
+  explicit PreParserExpression(uint32_t expression_code,
+                               const AstRawString* string = nullptr)
+      : code_(expression_code), string_(string) {}
 
   // The first three bits are for the Type.
   typedef BitField<Type, 0, 3> TypeField;
@@ -335,11 +362,16 @@
   // of the Type field, so they can share the storage.
   typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
   typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
+  typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseAsmField;
   typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
       IdentifierTypeField;
   typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
 
   uint32_t code_;
+  // Non-nullptr if the expression is one identifier.
+  const AstRawString* string_;
+
+  friend class PreParser;
 };
 
 
@@ -353,13 +385,18 @@
   PreParserList* operator->() { return this; }
   void Add(T, void*) { ++length_; }
   int length() const { return length_; }
+  static PreParserList Null() { return PreParserList(-1); }
+  bool IsNull() const { return length_ == -1; }
+
  private:
+  explicit PreParserList(int n) : length_(n) {}
   int length_;
 };
 
-
 typedef PreParserList<PreParserExpression> PreParserExpressionList;
 
+class PreParserStatement;
+typedef PreParserList<PreParserStatement> PreParserStatementList;
 
 class PreParserStatement {
  public:
@@ -367,12 +404,16 @@
     return PreParserStatement(kUnknownStatement);
   }
 
-  static PreParserStatement Jump() {
-    return PreParserStatement(kJumpStatement);
+  static PreParserStatement Null() {
+    return PreParserStatement(kNullStatement);
   }
 
-  static PreParserStatement FunctionDeclaration() {
-    return PreParserStatement(kFunctionDeclaration);
+  static PreParserStatement Empty() {
+    return PreParserStatement(kEmptyStatement);
+  }
+
+  static PreParserStatement Jump() {
+    return PreParserStatement(kJumpStatement);
   }
 
   // Creates expression statement from expression.
@@ -383,6 +424,9 @@
     if (expression.IsUseStrictLiteral()) {
       return PreParserStatement(kUseStrictExpressionStatement);
     }
+    if (expression.IsUseAsmLiteral()) {
+      return PreParserStatement(kUseAsmExpressionStatement);
+    }
     if (expression.IsStringLiteral()) {
       return PreParserStatement(kStringLiteralExpressionStatement);
     }
@@ -390,28 +434,43 @@
   }
 
   bool IsStringLiteral() {
-    return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral();
+    return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral() ||
+           IsUseAsmLiteral();
   }
 
   bool IsUseStrictLiteral() {
     return code_ == kUseStrictExpressionStatement;
   }
 
-  bool IsFunctionDeclaration() {
-    return code_ == kFunctionDeclaration;
-  }
+  bool IsUseAsmLiteral() { return code_ == kUseAsmExpressionStatement; }
 
   bool IsJumpStatement() {
     return code_ == kJumpStatement;
   }
 
+  bool IsNullStatement() { return code_ == kNullStatement; }
+
+  bool IsEmptyStatement() { return code_ == kEmptyStatement; }
+
+  // Dummy implementation for making statement->somefunc() work in both Parser
+  // and PreParser.
+  PreParserStatement* operator->() { return this; }
+
+  PreParserStatementList statements() { return PreParserStatementList(); }
+  void set_scope(Scope* scope) {}
+  void Initialize(PreParserExpression cond, PreParserStatement body) {}
+  void Initialize(PreParserStatement init, PreParserExpression cond,
+                  PreParserStatement next, PreParserStatement body) {}
+
  private:
   enum Type {
+    kNullStatement,
+    kEmptyStatement,
     kUnknownStatement,
     kJumpStatement,
     kStringLiteralExpressionStatement,
     kUseStrictExpressionStatement,
-    kFunctionDeclaration
+    kUseAsmExpressionStatement,
   };
 
   explicit PreParserStatement(Type code) : code_(code) {}
@@ -419,9 +478,6 @@
 };
 
 
-typedef PreParserList<PreParserStatement> PreParserStatementList;
-
-
 class PreParserFactory {
  public:
   explicit PreParserFactory(void* unused_value_factory) {}
@@ -433,31 +489,34 @@
                                        int pos) {
     return PreParserExpression::Default();
   }
+  PreParserExpression NewUndefinedLiteral(int pos) {
+    return PreParserExpression::Default();
+  }
   PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
                                        int js_flags, int literal_index,
                                        int pos) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewArrayLiteral(PreParserExpressionList values,
-                                      int literal_index,
-                                      int pos) {
-    return PreParserExpression::ArrayLiteral();
-  }
-  PreParserExpression NewArrayLiteral(PreParserExpressionList values,
                                       int first_spread_index, int literal_index,
                                       int pos) {
     return PreParserExpression::ArrayLiteral();
   }
+  PreParserExpression NewClassLiteralProperty(PreParserExpression key,
+                                              PreParserExpression value,
+                                              ClassLiteralProperty::Kind kind,
+                                              bool is_static,
+                                              bool is_computed_name) {
+    return PreParserExpression::Default();
+  }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
                                                PreParserExpression value,
                                                ObjectLiteralProperty::Kind kind,
-                                               bool is_static,
                                                bool is_computed_name) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
                                                PreParserExpression value,
-                                               bool is_static,
                                                bool is_computed_name) {
     return PreParserExpression::Default();
   }
@@ -533,15 +592,9 @@
                                  int pos) {
     return PreParserExpression::Default();
   }
-  PreParserExpression NewCallRuntime(const AstRawString* name,
-                                     const Runtime::Function* function,
-                                     PreParserExpressionList arguments,
-                                     int pos) {
-    return PreParserExpression::Default();
-  }
   PreParserStatement NewReturnStatement(PreParserExpression expression,
                                         int pos) {
-    return PreParserStatement::Default();
+    return PreParserStatement::Jump();
   }
   PreParserExpression NewFunctionLiteral(
       PreParserIdentifier name, Scope* scope, PreParserStatementList body,
@@ -549,8 +602,7 @@
       int parameter_count,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
-      int position) {
+      FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
     return PreParserExpression::Default();
   }
 
@@ -563,6 +615,77 @@
     return PreParserExpression::Default();
   }
 
+  PreParserStatement NewEmptyStatement(int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewBlock(ZoneList<const AstRawString*>* labels,
+                              int capacity, bool ignore_completion_value,
+                              int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewDebuggerStatement(int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewExpressionStatement(PreParserExpression expr, int pos) {
+    return PreParserStatement::ExpressionStatement(expr);
+  }
+
+  PreParserStatement NewIfStatement(PreParserExpression condition,
+                                    PreParserStatement then_statement,
+                                    PreParserStatement else_statement,
+                                    int pos) {
+    // This must return a jump statement iff both clauses are jump statements.
+    return else_statement.IsJumpStatement() ? then_statement : else_statement;
+  }
+
+  PreParserStatement NewBreakStatement(PreParserStatement target, int pos) {
+    return PreParserStatement::Jump();
+  }
+
+  PreParserStatement NewContinueStatement(PreParserStatement target, int pos) {
+    return PreParserStatement::Jump();
+  }
+
+  PreParserStatement NewWithStatement(Scope* scope,
+                                      PreParserExpression expression,
+                                      PreParserStatement statement, int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewDoWhileStatement(ZoneList<const AstRawString*>* labels,
+                                         int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewWhileStatement(ZoneList<const AstRawString*>* labels,
+                                       int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+                                        int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewCaseClause(PreParserExpression label,
+                                   PreParserStatementList statements, int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewForStatement(ZoneList<const AstRawString*>* labels,
+                                     int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserStatement NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+                                         ZoneList<const AstRawString*>* labels,
+                                         int pos) {
+    return PreParserStatement::Default();
+  }
+
   // Return the object itself as AstVisitor and implement the needed
   // dummy method right in this class.
   PreParserFactory* visitor() { return this; }
@@ -585,328 +708,46 @@
 
 class PreParser;
 
-template <>
-class ParserBaseTraits<PreParser> {
+class PreParserTarget {
  public:
-  typedef ParserBaseTraits<PreParser> PreParserTraits;
+  PreParserTarget(ParserBase<PreParser>* preparser,
+                  PreParserStatement statement) {}
+};
 
-  struct Type {
-    // PreParser doesn't need to store generator variables.
-    typedef void GeneratorVariable;
+class PreParserTargetScope {
+ public:
+  explicit PreParserTargetScope(ParserBase<PreParser>* preparser) {}
+};
 
-    typedef int AstProperties;
+template <>
+struct ParserTypes<PreParser> {
+  typedef ParserBase<PreParser> Base;
+  typedef PreParser Impl;
 
-    typedef v8::internal::ExpressionClassifier<PreParserTraits>
-        ExpressionClassifier;
+  // PreParser doesn't need to store generator variables.
+  typedef void Variable;
 
-    // Return types for traversing functions.
-    typedef PreParserIdentifier Identifier;
-    typedef PreParserExpression Expression;
-    typedef PreParserExpression YieldExpression;
-    typedef PreParserExpression FunctionLiteral;
-    typedef PreParserExpression ClassLiteral;
-    typedef PreParserExpression Literal;
-    typedef PreParserExpression ObjectLiteralProperty;
-    typedef PreParserExpressionList ExpressionList;
-    typedef PreParserExpressionList PropertyList;
-    typedef PreParserIdentifier FormalParameter;
-    typedef PreParserFormalParameters FormalParameters;
-    typedef PreParserStatementList StatementList;
+  // Return types for traversing functions.
+  typedef PreParserIdentifier Identifier;
+  typedef PreParserExpression Expression;
+  typedef PreParserExpression FunctionLiteral;
+  typedef PreParserExpression ObjectLiteralProperty;
+  typedef PreParserExpression ClassLiteralProperty;
+  typedef PreParserExpressionList ExpressionList;
+  typedef PreParserExpressionList ObjectPropertyList;
+  typedef PreParserExpressionList ClassPropertyList;
+  typedef PreParserFormalParameters FormalParameters;
+  typedef PreParserStatement Statement;
+  typedef PreParserStatementList StatementList;
+  typedef PreParserStatement Block;
+  typedef PreParserStatement BreakableStatement;
+  typedef PreParserStatement IterationStatement;
 
-    // For constructing objects returned by the traversing functions.
-    typedef PreParserFactory Factory;
-  };
+  // For constructing objects returned by the traversing functions.
+  typedef PreParserFactory Factory;
 
-  // TODO(nikolaos): The traits methods should not need to call methods
-  // of the implementation object.
-  PreParser* delegate() { return reinterpret_cast<PreParser*>(this); }
-  const PreParser* delegate() const {
-    return reinterpret_cast<const PreParser*>(this);
-  }
-
-  // Helper functions for recursive descent.
-  bool IsEval(PreParserIdentifier identifier) const {
-    return identifier.IsEval();
-  }
-
-  bool IsArguments(PreParserIdentifier identifier) const {
-    return identifier.IsArguments();
-  }
-
-  bool IsEvalOrArguments(PreParserIdentifier identifier) const {
-    return identifier.IsEvalOrArguments();
-  }
-
-  bool IsUndefined(PreParserIdentifier identifier) const {
-    return identifier.IsUndefined();
-  }
-
-  bool IsAwait(PreParserIdentifier identifier) const {
-    return identifier.IsAwait();
-  }
-
-  bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
-    return identifier.IsFutureStrictReserved();
-  }
-
-  // Returns true if the expression is of type "this.foo".
-  static bool IsThisProperty(PreParserExpression expression) {
-    return expression.IsThisProperty();
-  }
-
-  static bool IsIdentifier(PreParserExpression expression) {
-    return expression.IsIdentifier();
-  }
-
-  static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
-    return expression.AsIdentifier();
-  }
-
-  bool IsPrototype(PreParserIdentifier identifier) const {
-    return identifier.IsPrototype();
-  }
-
-  bool IsConstructor(PreParserIdentifier identifier) const {
-    return identifier.IsConstructor();
-  }
-
-  bool IsDirectEvalCall(PreParserExpression expression) const {
-    return expression.IsDirectEvalCall();
-  }
-
-  static bool IsBoilerplateProperty(PreParserExpression property) {
-    // PreParser doesn't count boilerplate properties.
-    return false;
-  }
-
-  static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
-    return false;
-  }
-
-  static PreParserExpression GetPropertyValue(PreParserExpression property) {
-    return PreParserExpression::Default();
-  }
-
-  // Functions for encapsulating the differences between parsing and preparsing;
-  // operations interleaved with the recursive descent.
-  static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
-    // PreParser should not use FuncNameInferrer.
-    UNREACHABLE();
-  }
-
-  void PushPropertyName(FuncNameInferrer* fni, PreParserExpression expression) {
-    // PreParser should not use FuncNameInferrer.
-    UNREACHABLE();
-  }
-
-  static void InferFunctionName(FuncNameInferrer* fni,
-                                PreParserExpression expression) {
-    // PreParser should not use FuncNameInferrer.
-    UNREACHABLE();
-  }
-
-  static void CheckAssigningFunctionLiteralToProperty(
-      PreParserExpression left, PreParserExpression right) {}
-
-  static PreParserExpression MarkExpressionAsAssigned(
-      PreParserExpression expression) {
-    // TODO(marja): To be able to produce the same errors, the preparser needs
-    // to start tracking which expressions are variables and which are assigned.
-    return expression;
-  }
-
-  bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
-                                              PreParserExpression y,
-                                              Token::Value op, int pos,
-                                              PreParserFactory* factory) {
-    return false;
-  }
-
-  PreParserExpression BuildUnaryExpression(PreParserExpression expression,
-                                           Token::Value op, int pos,
-                                           PreParserFactory* factory) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression BuildIteratorResult(PreParserExpression value,
-                                          bool done) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
-                                             int pos) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
-                                          PreParserIdentifier arg, int pos) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
-                                        PreParserIdentifier arg, int pos) {
-    return PreParserExpression::Default();
-  }
-
-  // Reporting errors.
-  void ReportMessageAt(Scanner::Location source_location,
-                       MessageTemplate::Template message,
-                       const char* arg = NULL,
-                       ParseErrorType error_type = kSyntaxError);
-  void ReportMessageAt(Scanner::Location source_location,
-                       MessageTemplate::Template message,
-                       const AstRawString* arg,
-                       ParseErrorType error_type = kSyntaxError);
-
-  // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
-  static void Void() {}
-
-  // "null" return type creators.
-  static PreParserIdentifier EmptyIdentifier() {
-    return PreParserIdentifier::Default();
-  }
-  static PreParserExpression EmptyExpression() {
-    return PreParserExpression::Default();
-  }
-  static PreParserExpression EmptyLiteral() {
-    return PreParserExpression::Default();
-  }
-  static PreParserExpression EmptyObjectLiteralProperty() {
-    return PreParserExpression::Default();
-  }
-  static PreParserExpression EmptyFunctionLiteral() {
-    return PreParserExpression::Default();
-  }
-
-  static PreParserExpressionList NullExpressionList() {
-    return PreParserExpressionList();
-  }
-  PreParserIdentifier EmptyIdentifierString() const {
-    return PreParserIdentifier::Default();
-  }
-
-  // Odd-ball literal creators.
-  PreParserExpression GetLiteralTheHole(int position,
-                                        PreParserFactory* factory) const {
-    return PreParserExpression::Default();
-  }
-
-  // Producing data during the recursive descent.
-  PreParserIdentifier GetSymbol(Scanner* scanner) const;
-
-  PreParserIdentifier GetNextSymbol(Scanner* scanner) const {
-    return PreParserIdentifier::Default();
-  }
-
-  PreParserIdentifier GetNumberAsSymbol(Scanner* scanner) const {
-    return PreParserIdentifier::Default();
-  }
-
-  PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
-    return PreParserExpression::This();
-  }
-
-  PreParserExpression NewSuperPropertyReference(PreParserFactory* factory,
-                                                int pos) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression NewSuperCallReference(PreParserFactory* factory,
-                                            int pos) {
-    return PreParserExpression::SuperCallReference();
-  }
-
-  PreParserExpression NewTargetExpression(int pos) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression FunctionSentExpression(PreParserFactory* factory,
-                                             int pos) const {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression ExpressionFromLiteral(Token::Value token, int pos,
-                                            Scanner* scanner,
-                                            PreParserFactory* factory) const {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpression ExpressionFromIdentifier(PreParserIdentifier name,
-                                               int start_position,
-                                               int end_position,
-                                               InferName = InferName::kYes) {
-    return PreParserExpression::FromIdentifier(name);
-  }
-
-  PreParserExpression ExpressionFromString(int pos, Scanner* scanner,
-                                           PreParserFactory* factory) const;
-
-  PreParserExpression GetIterator(PreParserExpression iterable,
-                                  PreParserFactory* factory, int pos) {
-    return PreParserExpression::Default();
-  }
-
-  PreParserExpressionList NewExpressionList(int size, Zone* zone) const {
-    return PreParserExpressionList();
-  }
-
-  PreParserExpressionList NewPropertyList(int size, Zone* zone) const {
-    return PreParserExpressionList();
-  }
-
-  PreParserStatementList NewStatementList(int size, Zone* zone) const {
-    return PreParserStatementList();
-  }
-
-  void AddParameterInitializationBlock(
-      const PreParserFormalParameters& parameters, PreParserStatementList body,
-      bool is_async, bool* ok) {}
-
-  void AddFormalParameter(PreParserFormalParameters* parameters,
-                          PreParserExpression pattern,
-                          PreParserExpression initializer,
-                          int initializer_end_position, bool is_rest) {
-    ++parameters->arity;
-  }
-
-  void DeclareFormalParameter(DeclarationScope* scope,
-                              PreParserIdentifier parameter,
-                              Type::ExpressionClassifier* classifier) {
-    if (!classifier->is_simple_parameter_list()) {
-      scope->SetHasNonSimpleParameters();
-    }
-  }
-
-  V8_INLINE void ParseArrowFunctionFormalParameterList(
-      PreParserFormalParameters* parameters, PreParserExpression params,
-      const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
-      const Scope::Snapshot& scope_snapshot, bool* ok);
-
-  void ReindexLiterals(const PreParserFormalParameters& parameters) {}
-
-  V8_INLINE PreParserExpression NoTemplateTag() {
-    return PreParserExpression::NoTemplateTag();
-  }
-  V8_INLINE static bool IsTaggedTemplate(const PreParserExpression tag) {
-    return !tag.IsNoTemplateTag();
-  }
-
-  inline void MaterializeUnspreadArgumentsLiterals(int count);
-
-  inline PreParserExpression ExpressionListToExpression(
-      PreParserExpressionList args) {
-    return PreParserExpression::Default();
-  }
-
-  void SetFunctionNameFromPropertyName(PreParserExpression property,
-                                       PreParserIdentifier name) {}
-  void SetFunctionNameFromIdentifierRef(PreParserExpression value,
-                                        PreParserExpression identifier) {}
-
-  V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
-      GetReportedErrorList() const;
-  V8_INLINE Zone* zone() const;
-  V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
+  typedef PreParserTarget Target;
+  typedef PreParserTargetScope TargetScope;
 };
 
 
@@ -924,9 +765,7 @@
 // it is used) are generally omitted.
 class PreParser : public ParserBase<PreParser> {
   friend class ParserBase<PreParser>;
-  // TODO(nikolaos): This should not be necessary. It will be removed
-  // when the traits object stops delegating to the implementation object.
-  friend class ParserBaseTraits<PreParser>;
+  friend class v8::internal::ExpressionClassifier<ParserTypes<PreParser>>;
 
  public:
   typedef PreParserIdentifier Identifier;
@@ -935,6 +774,7 @@
 
   enum PreParseResult {
     kPreParseStackOverflow,
+    kPreParseAbort,
     kPreParseSuccess
   };
 
@@ -942,7 +782,8 @@
             ParserRecorder* log, uintptr_t stack_limit)
       : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
                               ast_value_factory, log),
-        use_counts_(nullptr) {}
+        use_counts_(nullptr),
+        track_unresolved_variables_(false) {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
@@ -958,19 +799,19 @@
     // the global scope.
     if (is_module) scope = NewModuleScope(scope);
 
-    FunctionState top_scope(&function_state_, &scope_state_, scope,
-                            kNormalFunction);
+    FunctionState top_scope(&function_state_, &scope_state_, scope);
     bool ok = true;
     int start_position = scanner()->peek_location().beg_pos;
     parsing_module_ = is_module;
-    ParseStatementList(Token::EOS, &ok);
+    PreParserStatementList body;
+    ParseStatementList(body, Token::EOS, &ok);
     if (stack_overflow()) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner()->current_token());
     } else if (is_strict(this->scope()->language_mode())) {
       CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
                               &ok);
-      CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
+      CheckDecimalLiteralWithLeadingZero(start_position,
                                          scanner()->location().end_pos);
     }
     if (materialized_literals) {
@@ -987,16 +828,12 @@
   // keyword and parameters, and have consumed the initial '{'.
   // At return, unless an error occurred, the scanner is positioned before the
   // the final '}'.
-  PreParseResult PreParseLazyFunction(LanguageMode language_mode,
-                                      FunctionKind kind,
-                                      bool has_simple_parameters,
+  PreParseResult PreParseLazyFunction(DeclarationScope* function_scope,
                                       bool parsing_module, ParserRecorder* log,
-                                      Scanner::BookmarkScope* bookmark,
-                                      int* use_counts);
+                                      bool track_unresolved_variables,
+                                      bool may_abort, int* use_counts);
 
  private:
-  static const int kLazyParseTrialLimit = 200;
-
   // These types form an algebra over syntactic categories that is just
   // rich enough to let us recognize and propagate the constructs that
   // are either being counted in the preparser data, or is important
@@ -1006,72 +843,24 @@
   // which is set to false if parsing failed; it is unchanged otherwise.
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
-  Statement ParseStatementListItem(bool* ok);
-  void ParseStatementList(int end_token, bool* ok,
-                          Scanner::BookmarkScope* bookmark = nullptr);
-  Statement ParseStatement(AllowLabelledFunctionStatement allow_function,
-                           bool* ok);
-  Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
-                              bool* ok);
-  Statement ParseScopedStatement(bool legacy, bool* ok);
-  Statement ParseHoistableDeclaration(bool* ok);
-  Statement ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
-                                      bool* ok);
-  Statement ParseFunctionDeclaration(bool* ok);
-  Statement ParseAsyncFunctionDeclaration(bool* ok);
-  Expression ParseAsyncFunctionExpression(bool* ok);
-  Statement ParseClassDeclaration(bool* ok);
-  Statement ParseBlock(bool* ok);
-  Statement ParseVariableStatement(VariableDeclarationContext var_context,
-                                   bool* ok);
-  Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                      int* num_decl, bool* is_lexical,
-                                      bool* is_binding_pattern,
-                                      Scanner::Location* first_initializer_loc,
-                                      Scanner::Location* bindings_loc,
-                                      bool* ok);
-  Statement ParseExpressionOrLabelledStatement(
-      AllowLabelledFunctionStatement allow_function, bool* ok);
-  Statement ParseIfStatement(bool* ok);
-  Statement ParseContinueStatement(bool* ok);
-  Statement ParseBreakStatement(bool* ok);
-  Statement ParseReturnStatement(bool* ok);
-  Statement ParseWithStatement(bool* ok);
-  Statement ParseSwitchStatement(bool* ok);
-  Statement ParseDoWhileStatement(bool* ok);
-  Statement ParseWhileStatement(bool* ok);
-  Statement ParseForStatement(bool* ok);
-  Statement ParseThrowStatement(bool* ok);
-  Statement ParseTryStatement(bool* ok);
-  Statement ParseDebuggerStatement(bool* ok);
-  Expression ParseConditionalExpression(bool accept_IN, bool* ok);
-  Expression ParseObjectLiteral(bool* ok);
-  Expression ParseV8Intrinsic(bool* ok);
-  Expression ParseDoExpression(bool* ok);
 
   V8_INLINE PreParserStatementList ParseEagerFunctionBody(
       PreParserIdentifier function_name, int pos,
       const PreParserFormalParameters& parameters, FunctionKind kind,
       FunctionLiteral::FunctionType function_type, bool* ok);
 
-  V8_INLINE void SkipLazyFunctionBody(
-      int* materialized_literal_count, int* expected_property_count, bool* ok,
-      Scanner::BookmarkScope* bookmark = nullptr) {
+  V8_INLINE LazyParsingResult SkipLazyFunctionBody(
+      int* materialized_literal_count, int* expected_property_count,
+      bool track_unresolved_variables, bool may_abort, bool* ok) {
     UNREACHABLE();
+    return kLazyParsingComplete;
   }
   Expression ParseFunctionLiteral(
       Identifier name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_pos, FunctionLiteral::FunctionType function_type,
       LanguageMode language_mode, bool* ok);
-  void ParseLazyFunctionLiteralBody(bool* ok,
-                                    Scanner::BookmarkScope* bookmark = nullptr);
-
-  PreParserExpression ParseClassLiteral(ExpressionClassifier* classifier,
-                                        PreParserIdentifier name,
-                                        Scanner::Location class_name_location,
-                                        bool name_is_strict_reserved, int pos,
-                                        bool* ok);
+  LazyParsingResult ParseLazyFunctionLiteralBody(bool may_abort, bool* ok);
 
   struct TemplateLiteralState {};
 
@@ -1085,14 +874,14 @@
       TemplateLiteralState* state, int start, PreParserExpression tag);
   V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
 
+  V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
+    scope->SetLanguageMode(mode);
+  }
+  V8_INLINE void SetAsmModule() {}
+
   V8_INLINE void MarkCollectedTailCallExpressions() {}
   V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
 
-  void ParseAsyncArrowSingleExpressionBody(PreParserStatementList body,
-                                           bool accept_IN,
-                                           ExpressionClassifier* classifier,
-                                           int pos, bool* ok);
-
   V8_INLINE PreParserExpressionList
   PrepareSpreadArguments(PreParserExpressionList list) {
     return list;
@@ -1105,6 +894,11 @@
                                               PreParserExpressionList args,
                                               int pos);
 
+  V8_INLINE PreParserExpression
+  RewriteSuperCall(PreParserExpression call_expression) {
+    return call_expression;
+  }
+
   V8_INLINE void RewriteDestructuringAssignments() {}
 
   V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
@@ -1121,14 +915,102 @@
   RewriteAwaitExpression(PreParserExpression value, int pos) {
     return value;
   }
+  V8_INLINE void PrepareAsyncFunctionBody(PreParserStatementList body,
+                                          FunctionKind kind, int pos) {}
+  V8_INLINE void RewriteAsyncFunctionBody(PreParserStatementList body,
+                                          PreParserStatement block,
+                                          PreParserExpression return_value,
+                                          bool* ok) {}
   V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression generator,
                                                  PreParserExpression expression,
                                                  int pos) {
     return PreParserExpression::Default();
   }
-  V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
-                                   bool* ok) {
-    ValidateExpression(classifier, ok);
+  V8_INLINE void RewriteNonPattern(bool* ok) { ValidateExpression(ok); }
+
+  void DeclareAndInitializeVariables(
+      PreParserStatement block,
+      const DeclarationDescriptor* declaration_descriptor,
+      const DeclarationParsingResult::Declaration* declaration,
+      ZoneList<const AstRawString*>* names, bool* ok);
+
+  V8_INLINE ZoneList<const AstRawString*>* DeclareLabel(
+      ZoneList<const AstRawString*>* labels, PreParserExpression expr,
+      bool* ok) {
+    DCHECK(!expr.AsIdentifier().IsEnum());
+    DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
+    DCHECK(is_sloppy(language_mode()) ||
+           !IsFutureStrictReserved(expr.AsIdentifier()));
+    return labels;
+  }
+
+  // TODO(nikolaos): The preparser currently does not keep track of labels.
+  V8_INLINE bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+                               PreParserIdentifier label) {
+    return false;
+  }
+
+  V8_INLINE PreParserExpression RewriteReturn(PreParserExpression return_value,
+                                              int pos) {
+    return return_value;
+  }
+  V8_INLINE PreParserStatement RewriteSwitchStatement(
+      PreParserExpression tag, PreParserStatement switch_statement,
+      PreParserStatementList cases, Scope* scope) {
+    return PreParserStatement::Default();
+  }
+  V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {}
+  V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
+  V8_INLINE PreParserStatement RewriteTryStatement(
+      PreParserStatement try_block, PreParserStatement catch_block,
+      PreParserStatement finally_block, const CatchInfo& catch_info, int pos) {
+    return PreParserStatement::Default();
+  }
+
+  V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
+                                                    int pos, bool* ok) {
+    return PreParserExpression::Default();
+  }
+
+  // TODO(nikolaos): The preparser currently does not keep track of labels
+  // and targets.
+  V8_INLINE PreParserStatement LookupBreakTarget(PreParserIdentifier label,
+                                                 bool* ok) {
+    return PreParserStatement::Default();
+  }
+  V8_INLINE PreParserStatement LookupContinueTarget(PreParserIdentifier label,
+                                                    bool* ok) {
+    return PreParserStatement::Default();
+  }
+
+  V8_INLINE PreParserStatement DeclareFunction(
+      PreParserIdentifier variable_name, PreParserExpression function, int pos,
+      bool is_generator, bool is_async, ZoneList<const AstRawString*>* names,
+      bool* ok) {
+    return Statement::Default();
+  }
+
+  V8_INLINE PreParserStatement
+  DeclareClass(PreParserIdentifier variable_name, PreParserExpression value,
+               ZoneList<const AstRawString*>* names, int class_token_pos,
+               int end_pos, bool* ok) {
+    return PreParserStatement::Default();
+  }
+  V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
+                                      Scope* block_scope, ClassInfo* class_info,
+                                      int class_token_pos, bool* ok) {}
+  V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
+                                      PreParserExpression property,
+                                      ClassInfo* class_info, bool* ok) {}
+  V8_INLINE PreParserExpression RewriteClassLiteral(PreParserIdentifier name,
+                                                    ClassInfo* class_info,
+                                                    int pos, bool* ok) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserStatement DeclareNative(PreParserIdentifier name, int pos,
+                                             bool* ok) {
+    return PreParserStatement::Default();
   }
 
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
@@ -1136,15 +1018,425 @@
   V8_INLINE void QueueNonPatternForRewriting(PreParserExpression expr,
                                              bool* ok) {}
 
-  int* use_counts_;
-};
-
-void ParserBaseTraits<PreParser>::MaterializeUnspreadArgumentsLiterals(
-    int count) {
-  for (int i = 0; i < count; ++i) {
-    delegate()->function_state_->NextMaterializedLiteralIndex();
+  // Helper functions for recursive descent.
+  V8_INLINE bool IsEval(PreParserIdentifier identifier) const {
+    return identifier.IsEval();
   }
-}
+
+  V8_INLINE bool IsArguments(PreParserIdentifier identifier) const {
+    return identifier.IsArguments();
+  }
+
+  V8_INLINE bool IsEvalOrArguments(PreParserIdentifier identifier) const {
+    return identifier.IsEvalOrArguments();
+  }
+
+  V8_INLINE bool IsUndefined(PreParserIdentifier identifier) const {
+    return identifier.IsUndefined();
+  }
+
+  V8_INLINE bool IsAwait(PreParserIdentifier identifier) const {
+    return identifier.IsAwait();
+  }
+
+  V8_INLINE bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
+    return identifier.IsFutureStrictReserved();
+  }
+
+  // Returns true if the expression is of type "this.foo".
+  V8_INLINE static bool IsThisProperty(PreParserExpression expression) {
+    return expression.IsThisProperty();
+  }
+
+  V8_INLINE static bool IsIdentifier(PreParserExpression expression) {
+    return expression.IsIdentifier();
+  }
+
+  V8_INLINE static PreParserIdentifier AsIdentifier(
+      PreParserExpression expression) {
+    return expression.AsIdentifier();
+  }
+
+  V8_INLINE static PreParserExpression AsIdentifierExpression(
+      PreParserExpression expression) {
+    return expression;
+  }
+
+  V8_INLINE bool IsPrototype(PreParserIdentifier identifier) const {
+    return identifier.IsPrototype();
+  }
+
+  V8_INLINE bool IsConstructor(PreParserIdentifier identifier) const {
+    return identifier.IsConstructor();
+  }
+
+  V8_INLINE bool IsDirectEvalCall(PreParserExpression expression) const {
+    return expression.IsDirectEvalCall();
+  }
+
+  V8_INLINE static bool IsBoilerplateProperty(PreParserExpression property) {
+    // PreParser doesn't count boilerplate properties.
+    return false;
+  }
+
+  V8_INLINE bool IsNative(PreParserExpression expr) const {
+    // Preparsing is disabled for extensions (because the extension
+    // details aren't passed to lazily compiled functions), so we
+    // don't accept "native function" in the preparser and there is
+    // no need to keep track of "native".
+    return false;
+  }
+
+  V8_INLINE static bool IsArrayIndex(PreParserIdentifier string,
+                                     uint32_t* index) {
+    return false;
+  }
+
+  V8_INLINE bool IsUseStrictDirective(PreParserStatement statement) const {
+    return statement.IsUseStrictLiteral();
+  }
+
+  V8_INLINE bool IsUseAsmDirective(PreParserStatement statement) const {
+    return statement.IsUseAsmLiteral();
+  }
+
+  V8_INLINE bool IsStringLiteral(PreParserStatement statement) const {
+    return statement.IsStringLiteral();
+  }
+
+  V8_INLINE static PreParserExpression GetPropertyValue(
+      PreParserExpression property) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE static void GetDefaultStrings(
+      PreParserIdentifier* default_string,
+      PreParserIdentifier* star_default_star_string) {}
+
+  // Functions for encapsulating the differences between parsing and preparsing;
+  // operations interleaved with the recursive descent.
+  V8_INLINE static void PushLiteralName(PreParserIdentifier id) {}
+  V8_INLINE static void PushVariableName(PreParserIdentifier id) {}
+  V8_INLINE void PushPropertyName(PreParserExpression expression) {}
+  V8_INLINE void PushEnclosingName(PreParserIdentifier name) {}
+  V8_INLINE static void AddFunctionForNameInference(
+      PreParserExpression expression) {}
+  V8_INLINE static void InferFunctionName() {}
+
+  V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
+      PreParserExpression left, PreParserExpression right) {}
+
+  V8_INLINE static PreParserExpression MarkExpressionAsAssigned(
+      PreParserExpression expression) {
+    // TODO(marja): To be able to produce the same errors, the preparser needs
+    // to start tracking which expressions are variables and which are assigned.
+    return expression;
+  }
+
+  V8_INLINE bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+                                                        PreParserExpression y,
+                                                        Token::Value op,
+                                                        int pos) {
+    return false;
+  }
+
+  V8_INLINE PreParserExpression BuildUnaryExpression(
+      PreParserExpression expression, Token::Value op, int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression BuildIteratorResult(PreParserExpression value,
+                                                    bool done) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserStatement
+  BuildInitializationBlock(DeclarationParsingResult* parsing_result,
+                           ZoneList<const AstRawString*>* names, bool* ok) {
+    return PreParserStatement::Default();
+  }
+
+  V8_INLINE PreParserStatement
+  InitializeForEachStatement(PreParserStatement stmt, PreParserExpression each,
+                             PreParserExpression subject,
+                             PreParserStatement body, int each_keyword_pos) {
+    return stmt;
+  }
+
+  V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
+    return PreParserStatement::Null();
+  }
+  V8_INLINE void DesugarBindingInForEachStatement(
+      ForInfo* for_info, PreParserStatement* body_block,
+      PreParserExpression* each_variable, bool* ok) {}
+  V8_INLINE PreParserStatement CreateForEachStatementTDZ(
+      PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
+    return init_block;
+  }
+
+  V8_INLINE StatementT DesugarLexicalBindingsInForStatement(
+      PreParserStatement loop, PreParserStatement init,
+      PreParserExpression cond, PreParserStatement next,
+      PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
+      bool* ok) {
+    return loop;
+  }
+
+  V8_INLINE PreParserExpression
+  NewThrowReferenceError(MessageTemplate::Template message, int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression NewThrowSyntaxError(
+      MessageTemplate::Template message, PreParserIdentifier arg, int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression NewThrowTypeError(
+      MessageTemplate::Template message, PreParserIdentifier arg, int pos) {
+    return PreParserExpression::Default();
+  }
+
+  // Reporting errors.
+  V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+                                 MessageTemplate::Template message,
+                                 const char* arg = NULL,
+                                 ParseErrorType error_type = kSyntaxError) {
+    log_->LogMessage(source_location.beg_pos, source_location.end_pos, message,
+                     arg, error_type);
+  }
+
+  V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+                                 MessageTemplate::Template message,
+                                 PreParserIdentifier arg,
+                                 ParseErrorType error_type = kSyntaxError) {
+    UNREACHABLE();
+  }
+
+  // "null" return type creators.
+  V8_INLINE static PreParserIdentifier EmptyIdentifier() {
+    return PreParserIdentifier::Empty();
+  }
+  V8_INLINE static bool IsEmptyIdentifier(PreParserIdentifier name) {
+    return name.IsEmpty();
+  }
+  V8_INLINE static PreParserExpression EmptyExpression() {
+    return PreParserExpression::Empty();
+  }
+  V8_INLINE static PreParserExpression EmptyLiteral() {
+    return PreParserExpression::Default();
+  }
+  V8_INLINE static PreParserExpression EmptyObjectLiteralProperty() {
+    return PreParserExpression::Default();
+  }
+  V8_INLINE static PreParserExpression EmptyClassLiteralProperty() {
+    return PreParserExpression::Default();
+  }
+  V8_INLINE static PreParserExpression EmptyFunctionLiteral() {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE static bool IsEmptyExpression(PreParserExpression expr) {
+    return expr.IsEmpty();
+  }
+
+  V8_INLINE static PreParserExpressionList NullExpressionList() {
+    return PreParserExpressionList::Null();
+  }
+
+  V8_INLINE static bool IsNullExpressionList(PreParserExpressionList exprs) {
+    return exprs.IsNull();
+  }
+
+  V8_INLINE static PreParserStatementList NullStatementList() {
+    return PreParserStatementList::Null();
+  }
+
+  V8_INLINE static bool IsNullStatementList(PreParserStatementList stmts) {
+    return stmts.IsNull();
+  }
+
+  V8_INLINE static PreParserStatement NullStatement() {
+    return PreParserStatement::Null();
+  }
+
+  V8_INLINE bool IsNullStatement(PreParserStatement stmt) {
+    return stmt.IsNullStatement();
+  }
+
+  V8_INLINE bool IsEmptyStatement(PreParserStatement stmt) {
+    return stmt.IsEmptyStatement();
+  }
+
+  V8_INLINE static PreParserStatement NullBlock() {
+    return PreParserStatement::Null();
+  }
+
+  V8_INLINE PreParserIdentifier EmptyIdentifierString() const {
+    return PreParserIdentifier::Default();
+  }
+
+  // Odd-ball literal creators.
+  V8_INLINE PreParserExpression GetLiteralTheHole(int position) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression GetLiteralUndefined(int position) {
+    return PreParserExpression::Default();
+  }
+
+  // Producing data during the recursive descent.
+  PreParserIdentifier GetSymbol() const;
+
+  V8_INLINE PreParserIdentifier GetNextSymbol() const {
+    return PreParserIdentifier::Default();
+  }
+
+  V8_INLINE PreParserIdentifier GetNumberAsSymbol() const {
+    return PreParserIdentifier::Default();
+  }
+
+  V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
+    return PreParserExpression::This();
+  }
+
+  V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression NewSuperCallReference(int pos) {
+    return PreParserExpression::SuperCallReference();
+  }
+
+  V8_INLINE PreParserExpression NewTargetExpression(int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression FunctionSentExpression(int pos) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserExpression ExpressionFromLiteral(Token::Value token,
+                                                      int pos) {
+    return PreParserExpression::Default();
+  }
+
+  PreParserExpression ExpressionFromIdentifier(
+      PreParserIdentifier name, int start_position, int end_position,
+      InferName infer = InferName::kYes);
+
+  V8_INLINE PreParserExpression ExpressionFromString(int pos) {
+    if (scanner()->UnescapedLiteralMatches("use strict", 10)) {
+      return PreParserExpression::UseStrictStringLiteral();
+    }
+    return PreParserExpression::StringLiteral();
+  }
+
+  V8_INLINE PreParserExpressionList NewExpressionList(int size) const {
+    return PreParserExpressionList();
+  }
+
+  V8_INLINE PreParserExpressionList NewObjectPropertyList(int size) const {
+    return PreParserExpressionList();
+  }
+
+  V8_INLINE PreParserExpressionList NewClassPropertyList(int size) const {
+    return PreParserExpressionList();
+  }
+
+  V8_INLINE PreParserStatementList NewStatementList(int size) const {
+    return PreParserStatementList();
+  }
+
+  PreParserStatementList NewCaseClauseList(int size) {
+    return PreParserStatementList();
+  }
+
+  V8_INLINE PreParserExpression
+  NewV8Intrinsic(PreParserIdentifier name, PreParserExpressionList arguments,
+                 int pos, bool* ok) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE PreParserStatement NewThrowStatement(PreParserExpression exception,
+                                                 int pos) {
+    return PreParserStatement::Jump();
+  }
+
+  V8_INLINE void AddParameterInitializationBlock(
+      const PreParserFormalParameters& parameters, PreParserStatementList body,
+      bool is_async, bool* ok) {}
+
+  V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
+                                    PreParserExpression pattern,
+                                    PreParserExpression initializer,
+                                    int initializer_end_position,
+                                    bool is_rest) {
+    ++parameters->arity;
+  }
+
+  V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
+                                        PreParserIdentifier parameter) {
+    if (!classifier()->is_simple_parameter_list()) {
+      scope->SetHasNonSimpleParameters();
+    }
+  }
+
+  V8_INLINE void DeclareArrowFunctionFormalParameters(
+      PreParserFormalParameters* parameters, PreParserExpression params,
+      const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+      bool* ok) {
+    // TODO(wingo): Detect duplicated identifiers in paramlists.  Detect
+    // parameter lists that are too long.
+  }
+
+  V8_INLINE void ReindexLiterals(const PreParserFormalParameters& parameters) {}
+
+  V8_INLINE PreParserExpression NoTemplateTag() {
+    return PreParserExpression::NoTemplateTag();
+  }
+
+  V8_INLINE static bool IsTaggedTemplate(const PreParserExpression tag) {
+    return !tag.IsNoTemplateTag();
+  }
+
+  V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {
+    for (int i = 0; i < count; ++i) {
+      function_state_->NextMaterializedLiteralIndex();
+    }
+  }
+
+  V8_INLINE PreParserExpression
+  ExpressionListToExpression(PreParserExpressionList args) {
+    return PreParserExpression::Default();
+  }
+
+  V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
+                                                 PreParserExpression function,
+                                                 PreParserIdentifier name) {}
+  V8_INLINE void SetFunctionNameFromPropertyName(PreParserExpression property,
+                                                 PreParserIdentifier name) {}
+  V8_INLINE void SetFunctionNameFromIdentifierRef(
+      PreParserExpression value, PreParserExpression identifier) {}
+
+  V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+  GetReportedErrorList() const {
+    return function_state_->GetReportedErrorList();
+  }
+
+  V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const {
+    return function_state_->non_patterns_to_rewrite();
+  }
+
+  V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
+    if (use_counts_ != nullptr) ++use_counts_[feature];
+  }
+
+  // Preparser's private field members.
+
+  int* use_counts_;
+  bool track_unresolved_variables_;
+};
 
 PreParserExpression PreParser::SpreadCall(PreParserExpression function,
                                           PreParserExpressionList args,
@@ -1158,46 +1450,24 @@
   return factory()->NewCallNew(function, args, pos);
 }
 
-void ParserBaseTraits<PreParser>::ParseArrowFunctionFormalParameterList(
-    PreParserFormalParameters* parameters, PreParserExpression params,
-    const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
-    const Scope::Snapshot& scope_snapshot, bool* ok) {
-  // TODO(wingo): Detect duplicated identifiers in paramlists.  Detect parameter
-  // lists that are too long.
-}
-
-ZoneList<PreParserExpression>* ParserBaseTraits<PreParser>::GetNonPatternList()
-    const {
-  return delegate()->function_state_->non_patterns_to_rewrite();
-}
-
-ZoneList<
-    typename ParserBaseTraits<PreParser>::Type::ExpressionClassifier::Error>*
-ParserBaseTraits<PreParser>::GetReportedErrorList() const {
-  return delegate()->function_state_->GetReportedErrorList();
-}
-
-Zone* ParserBaseTraits<PreParser>::zone() const {
-  return delegate()->function_state_->scope()->zone();
-}
-
 PreParserStatementList PreParser::ParseEagerFunctionBody(
     PreParserIdentifier function_name, int pos,
     const PreParserFormalParameters& parameters, FunctionKind kind,
     FunctionLiteral::FunctionType function_type, bool* ok) {
   ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+  PreParserStatementList result;
 
   Scope* inner_scope = scope();
   if (!parameters.is_simple) inner_scope = NewScope(BLOCK_SCOPE);
 
   {
     BlockState block_state(&scope_state_, inner_scope);
-    ParseStatementList(Token::RBRACE, ok);
+    ParseStatementList(result, Token::RBRACE, ok);
     if (!*ok) return PreParserStatementList();
   }
 
   Expect(Token::RBRACE, ok);
-  return PreParserStatementList();
+  return result;
 }
 
 PreParserExpression PreParser::CloseTemplateLiteral(TemplateLiteralState* state,
diff --git a/src/parsing/rewriter.cc b/src/parsing/rewriter.cc
index 51ff547..57009bd 100644
--- a/src/parsing/rewriter.cc
+++ b/src/parsing/rewriter.cc
@@ -347,10 +347,13 @@
     Variable* result = closure_scope->NewTemporary(
         info->ast_value_factory()->dot_result_string());
     // The name string must be internalized at this point.
+    info->ast_value_factory()->Internalize(info->isolate());
     DCHECK(!result->name().is_null());
     Processor processor(info->isolate(), closure_scope, result,
                         info->ast_value_factory());
     processor.Process(body);
+    // Internalize any values created during rewriting.
+    info->ast_value_factory()->Internalize(info->isolate());
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
diff --git a/src/parsing/scanner-character-streams.cc b/src/parsing/scanner-character-streams.cc
index 7cdef87..3f10cfa 100644
--- a/src/parsing/scanner-character-streams.cc
+++ b/src/parsing/scanner-character-streams.cc
@@ -7,506 +7,677 @@
 #include "include/v8.h"
 #include "src/globals.h"
 #include "src/handles.h"
-#include "src/list-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
 #include "src/objects-inl.h"
+#include "src/parsing/scanner.h"
 #include "src/unicode-inl.h"
 
 namespace v8 {
 namespace internal {
 
-namespace {
-
-size_t CopyUtf8CharsToUtf16Chars(uint16_t* dest, size_t length, const byte* src,
-                                 size_t* src_pos, size_t src_length) {
-  static const unibrow::uchar kMaxUtf16Character =
-      unibrow::Utf16::kMaxNonSurrogateCharCode;
-  size_t i = 0;
-  // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
-  // one character early (in the normal case), because we need to have at least
-  // two free spaces in the buffer to be sure that the next character will fit.
-  while (i < length - 1) {
-    if (*src_pos == src_length) break;
-    unibrow::uchar c = src[*src_pos];
-    if (c <= unibrow::Utf8::kMaxOneByteChar) {
-      *src_pos = *src_pos + 1;
-    } else {
-      c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
-                                        src_pos);
-    }
-    if (c > kMaxUtf16Character) {
-      dest[i++] = unibrow::Utf16::LeadSurrogate(c);
-      dest[i++] = unibrow::Utf16::TrailSurrogate(c);
-    } else {
-      dest[i++] = static_cast<uc16>(c);
-    }
-  }
-  return i;
-}
-
-size_t CopyCharsHelper(uint16_t* dest, size_t length, const uint8_t* src,
-                       size_t* src_pos, size_t src_length,
-                       ScriptCompiler::StreamedSource::Encoding encoding) {
-  // It's possible that this will be called with length 0, but don't assume that
-  // the functions this calls handle it gracefully.
-  if (length == 0) return 0;
-
-  if (encoding == ScriptCompiler::StreamedSource::UTF8) {
-    return CopyUtf8CharsToUtf16Chars(dest, length, src, src_pos, src_length);
-  }
-
-  size_t to_fill = length;
-  if (to_fill > src_length - *src_pos) to_fill = src_length - *src_pos;
-
-  if (encoding == ScriptCompiler::StreamedSource::ONE_BYTE) {
-    v8::internal::CopyChars<uint8_t, uint16_t>(dest, src + *src_pos, to_fill);
-  } else {
-    DCHECK(encoding == ScriptCompiler::StreamedSource::TWO_BYTE);
-    v8::internal::CopyChars<uint16_t, uint16_t>(
-        dest, reinterpret_cast<const uint16_t*>(src + *src_pos), to_fill);
-  }
-  *src_pos += to_fill;
-  return to_fill;
-}
-
-}  // namespace
-
-
 // ----------------------------------------------------------------------------
 // BufferedUtf16CharacterStreams
+//
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos() pointing to any position,
+// even positions before the current).
+class BufferedUtf16CharacterStream : public Utf16CharacterStream {
+ public:
+  BufferedUtf16CharacterStream();
+
+ protected:
+  static const size_t kBufferSize = 512;
+
+  bool ReadBlock() override;
+
+  // FillBuffer should read up to kBufferSize characters at position and store
+  // them into buffer_[0..]. It returns the number of characters stored.
+  virtual size_t FillBuffer(size_t position) = 0;
+
+  // Fixed sized buffer that this class reads from.
+  // The base class' buffer_start_ should always point to buffer_.
+  uc16 buffer_[kBufferSize];
+};
 
 BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
-    : Utf16CharacterStream(),
-      pushback_limit_(NULL) {
-  // Initialize buffer as being empty. First read will fill the buffer.
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_;
-}
-
-
-BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
-
-void BufferedUtf16CharacterStream::PushBack(uc32 character) {
-  if (character == kEndOfInput) {
-    pos_--;
-    return;
-  }
-  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
-    // buffer_ is writable, buffer_cursor_ is const pointer.
-    buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
-    pos_--;
-    return;
-  }
-  SlowPushBack(static_cast<uc16>(character));
-}
-
-
-void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
-  // In pushback mode, the end of the buffer contains pushback,
-  // and the start of the buffer (from buffer start to pushback_limit_)
-  // contains valid data that comes just after the pushback.
-  // We NULL the pushback_limit_ if pushing all the way back to the
-  // start of the buffer.
-
-  if (pushback_limit_ == NULL) {
-    // Enter pushback mode.
-    pushback_limit_ = buffer_end_;
-    buffer_end_ = buffer_ + kBufferSize;
-    buffer_cursor_ = buffer_end_;
-  }
-  // Ensure that there is room for at least one pushback.
-  DCHECK(buffer_cursor_ > buffer_);
-  DCHECK(pos_ > 0);
-  buffer_[--buffer_cursor_ - buffer_] = character;
-  if (buffer_cursor_ == buffer_) {
-    pushback_limit_ = NULL;
-  } else if (buffer_cursor_ < pushback_limit_) {
-    pushback_limit_ = buffer_cursor_;
-  }
-  pos_--;
-}
-
+    : Utf16CharacterStream(buffer_, buffer_, buffer_, 0) {}
 
 bool BufferedUtf16CharacterStream::ReadBlock() {
+  DCHECK_EQ(buffer_start_, buffer_);
+
+  size_t position = pos();
+  buffer_pos_ = position;
   buffer_cursor_ = buffer_;
-  if (pushback_limit_ != NULL) {
-    // Leave pushback mode.
-    buffer_end_ = pushback_limit_;
-    pushback_limit_ = NULL;
-    // If there were any valid characters left at the
-    // start of the buffer, use those.
-    if (buffer_cursor_ < buffer_end_) return true;
-    // Otherwise read a new block.
-  }
-  size_t length = FillBuffer(pos_);
-  buffer_end_ = buffer_ + length;
-  return length > 0;
+  buffer_end_ = buffer_ + FillBuffer(position);
+  DCHECK_EQ(pos(), position);
+  DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
+  return buffer_cursor_ < buffer_end_;
 }
 
-
-size_t BufferedUtf16CharacterStream::SlowSeekForward(size_t delta) {
-  // Leave pushback mode (i.e., ignore that there might be valid data
-  // in the buffer before the pushback_limit_ point).
-  pushback_limit_ = NULL;
-  return BufferSeekForward(delta);
-}
-
-
 // ----------------------------------------------------------------------------
-// GenericStringUtf16CharacterStream
+// GenericStringUtf16CharacterStream.
+//
+// A stream w/ a data source being a (flattened) Handle<String>.
 
+class GenericStringUtf16CharacterStream : public BufferedUtf16CharacterStream {
+ public:
+  GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
+                                    size_t end_position);
+
+ protected:
+  size_t FillBuffer(size_t position) override;
+
+  Handle<String> string_;
+  size_t length_;
+};
 
 GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
     Handle<String> data, size_t start_position, size_t end_position)
-    : string_(data), length_(end_position), bookmark_(kNoBookmark) {
-  DCHECK(end_position >= start_position);
-  pos_ = start_position;
+    : string_(data), length_(end_position) {
+  DCHECK_GE(end_position, start_position);
+  DCHECK_GE(static_cast<size_t>(string_->length()),
+            end_position - start_position);
+  buffer_pos_ = start_position;
 }
 
-
-GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
-
-
-bool GenericStringUtf16CharacterStream::SetBookmark() {
-  bookmark_ = pos_;
-  return true;
-}
-
-
-void GenericStringUtf16CharacterStream::ResetToBookmark() {
-  DCHECK(bookmark_ != kNoBookmark);
-  pos_ = bookmark_;
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_ + FillBuffer(pos_);
-}
-
-
-size_t GenericStringUtf16CharacterStream::BufferSeekForward(size_t delta) {
-  size_t old_pos = pos_;
-  pos_ = Min(pos_ + delta, length_);
-  ReadBlock();
-  return pos_ - old_pos;
-}
-
-
 size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
   if (from_pos >= length_) return 0;
-  size_t length = kBufferSize;
-  if (from_pos + length > length_) {
-    length = length_ - from_pos;
-  }
+
+  size_t length = i::Min(kBufferSize, length_ - from_pos);
   String::WriteToFlat<uc16>(*string_, buffer_, static_cast<int>(from_pos),
                             static_cast<int>(from_pos + length));
   return length;
 }
 
-
 // ----------------------------------------------------------------------------
-// ExternalStreamingStream
+// ExternalTwoByteStringUtf16CharacterStream.
+//
+// A stream whose data source is a Handle<ExternalTwoByteString>. It avoids
+// all data copying.
 
-size_t ExternalStreamingStream::FillBuffer(size_t position) {
-  // Ignore "position" which is the position in the decoded data. Instead,
-  // ExternalStreamingStream keeps track of the position in the raw data.
-  size_t data_in_buffer = 0;
-  // Note that the UTF-8 decoder might not be able to fill the buffer
-  // completely; it will typically leave the last character empty (see
-  // Utf8ToUtf16CharacterStream::CopyChars).
-  while (data_in_buffer < kBufferSize - 1) {
-    if (current_data_ == NULL) {
-      // GetSomeData will wait until the embedder has enough data. Here's an
-      // interface between the API which uses size_t (which is the correct type
-      // here) and the internal parts which use size_t.
-      current_data_length_ = source_stream_->GetMoreData(&current_data_);
-      current_data_offset_ = 0;
-      bool data_ends = current_data_length_ == 0;
-      bookmark_data_is_from_current_data_ = false;
+class ExternalTwoByteStringUtf16CharacterStream : public Utf16CharacterStream {
+ public:
+  ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
+                                            size_t start_position,
+                                            size_t end_position);
 
-      // A caveat: a data chunk might end with bytes from an incomplete UTF-8
-      // character (the rest of the bytes will be in the next chunk).
-      if (encoding_ == ScriptCompiler::StreamedSource::UTF8) {
-        HandleUtf8SplitCharacters(&data_in_buffer);
-        if (!data_ends && current_data_offset_ == current_data_length_) {
-          // The data stream didn't end, but we used all the data in the
-          // chunk. This will only happen when the chunk was really small. We
-          // don't handle the case where a UTF-8 character is split over several
-          // chunks; in that case V8 won't crash, but it will be a parse error.
-          FlushCurrent();
-          continue;  // Request a new chunk.
-        }
-      }
+ private:
+  bool ReadBlock() override;
 
-      // Did the data stream end?
-      if (data_ends) {
-        DCHECK(utf8_split_char_buffer_length_ == 0);
-        return data_in_buffer;
-      }
-    }
-
-    // Fill the buffer from current_data_.
-    size_t new_offset = 0;
-    size_t new_chars_in_buffer =
-        CopyCharsHelper(buffer_ + data_in_buffer, kBufferSize - data_in_buffer,
-                        current_data_ + current_data_offset_, &new_offset,
-                        current_data_length_ - current_data_offset_, encoding_);
-    data_in_buffer += new_chars_in_buffer;
-    current_data_offset_ += new_offset;
-    DCHECK(data_in_buffer <= kBufferSize);
-
-    // Did we use all the data in the data chunk?
-    if (current_data_offset_ == current_data_length_) {
-      FlushCurrent();
-    }
-  }
-  return data_in_buffer;
-}
-
-
-bool ExternalStreamingStream::SetBookmark() {
-  // Bookmarking for this stream is a bit more complex than expected, since
-  // the stream state is distributed over several places:
-  // - pos_ (inherited from Utf16CharacterStream)
-  // - buffer_cursor_ and buffer_end_ (also from Utf16CharacterStream)
-  // - buffer_ (from BufferedUtf16CharacterStream)
-  // - current_data_ (+ .._offset_ and .._length) (this class)
-  // - utf8_split_char_buffer_* (a partial utf8 symbol at the block boundary)
-  //
-  // The underlying source_stream_ instance likely could re-construct this
-  // local data for us, but with the given interfaces we have no way of
-  // accomplishing this. Thus, we'll have to save all data locally.
-  //
-  // What gets saved where:
-  // - pos_  =>  bookmark_
-  // - buffer_[buffer_cursor_ .. buffer_end_]  =>  bookmark_buffer_
-  // - current_data_[.._offset_ .. .._length_]  =>  bookmark_data_
-  // - utf8_split_char_buffer_* => bookmark_utf8_split...
-  //
-  // To make sure we don't unnecessarily copy data, we also maintain
-  // whether bookmark_data_ contains a copy of the current current_data_
-  // block. This is done with:
-  // - bookmark_data_is_from_current_data_
-  // - bookmark_data_offset_: offset into bookmark_data_
-  //
-  // Note that bookmark_data_is_from_current_data_ must be maintained
-  // whenever current_data_ is updated.
-
-  bookmark_ = pos_;
-
-  size_t buffer_length = buffer_end_ - buffer_cursor_;
-  bookmark_buffer_.Dispose();
-  bookmark_buffer_ = Vector<uint16_t>::New(static_cast<int>(buffer_length));
-  CopyCharsUnsigned(bookmark_buffer_.start(), buffer_cursor_, buffer_length);
-
-  size_t data_length = current_data_length_ - current_data_offset_;
-  size_t bookmark_data_length = static_cast<size_t>(bookmark_data_.length());
-  if (bookmark_data_is_from_current_data_ &&
-      data_length < bookmark_data_length) {
-    // Fast case: bookmark_data_ was previously copied from the current
-    //            data block, and we have enough data for this bookmark.
-    bookmark_data_offset_ = bookmark_data_length - data_length;
-  } else {
-    // Slow case: We need to copy current_data_.
-    bookmark_data_.Dispose();
-    bookmark_data_ = Vector<uint8_t>::New(static_cast<int>(data_length));
-    CopyBytes(bookmark_data_.start(), current_data_ + current_data_offset_,
-              data_length);
-    bookmark_data_is_from_current_data_ = true;
-    bookmark_data_offset_ = 0;
-  }
-
-  bookmark_utf8_split_char_buffer_length_ = utf8_split_char_buffer_length_;
-  for (size_t i = 0; i < utf8_split_char_buffer_length_; i++) {
-    bookmark_utf8_split_char_buffer_[i] = utf8_split_char_buffer_[i];
-  }
-
-  return source_stream_->SetBookmark();
-}
-
-
-void ExternalStreamingStream::ResetToBookmark() {
-  source_stream_->ResetToBookmark();
-  FlushCurrent();
-
-  pos_ = bookmark_;
-
-  // bookmark_data_* => current_data_*
-  // (current_data_ assumes ownership of its memory.)
-  current_data_offset_ = 0;
-  current_data_length_ = bookmark_data_.length() - bookmark_data_offset_;
-  uint8_t* data = new uint8_t[current_data_length_];
-  CopyCharsUnsigned(data, bookmark_data_.begin() + bookmark_data_offset_,
-                    current_data_length_);
-  delete[] current_data_;
-  current_data_ = data;
-  bookmark_data_is_from_current_data_ = true;
-
-  // bookmark_buffer_ needs to be copied to buffer_.
-  CopyCharsUnsigned(buffer_, bookmark_buffer_.begin(),
-                    bookmark_buffer_.length());
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_ + bookmark_buffer_.length();
-
-  // utf8 split char buffer
-  utf8_split_char_buffer_length_ = bookmark_utf8_split_char_buffer_length_;
-  for (size_t i = 0; i < bookmark_utf8_split_char_buffer_length_; i++) {
-    utf8_split_char_buffer_[i] = bookmark_utf8_split_char_buffer_[i];
-  }
-}
-
-
-void ExternalStreamingStream::FlushCurrent() {
-  delete[] current_data_;
-  current_data_ = NULL;
-  current_data_length_ = 0;
-  current_data_offset_ = 0;
-  bookmark_data_is_from_current_data_ = false;
-}
-
-
-void ExternalStreamingStream::HandleUtf8SplitCharacters(
-    size_t* data_in_buffer) {
-  // Note the following property of UTF-8 which makes this function possible:
-  // Given any byte, we can always read its local environment (in both
-  // directions) to find out the (possibly multi-byte) character it belongs
-  // to. Single byte characters are of the form 0b0XXXXXXX. The first byte of a
-  // multi-byte character is of the form 0b110XXXXX, 0b1110XXXX or
-  // 0b11110XXX. The continuation bytes are of the form 0b10XXXXXX.
-
-  // First check if we have leftover data from the last chunk.
-  unibrow::uchar c;
-  if (utf8_split_char_buffer_length_ > 0) {
-    // Move the bytes which are part of the split character (which started in
-    // the previous chunk) into utf8_split_char_buffer_. Note that the
-    // continuation bytes are of the form 0b10XXXXXX, thus c >> 6 == 2.
-    while (current_data_offset_ < current_data_length_ &&
-           utf8_split_char_buffer_length_ < 4 &&
-           (c = current_data_[current_data_offset_]) >> 6 == 2) {
-      utf8_split_char_buffer_[utf8_split_char_buffer_length_] = c;
-      ++utf8_split_char_buffer_length_;
-      ++current_data_offset_;
-    }
-
-    // Convert the data in utf8_split_char_buffer_.
-    size_t new_offset = 0;
-    size_t new_chars_in_buffer =
-        CopyCharsHelper(buffer_ + *data_in_buffer,
-                        kBufferSize - *data_in_buffer, utf8_split_char_buffer_,
-                        &new_offset, utf8_split_char_buffer_length_, encoding_);
-    *data_in_buffer += new_chars_in_buffer;
-    // Make sure we used all the data.
-    DCHECK(new_offset == utf8_split_char_buffer_length_);
-    DCHECK(*data_in_buffer <= kBufferSize);
-
-    utf8_split_char_buffer_length_ = 0;
-  }
-
-  // Move bytes which are part of an incomplete character from the end of the
-  // current chunk to utf8_split_char_buffer_. They will be converted when the
-  // next data chunk arrives. Note that all valid UTF-8 characters are at most 4
-  // bytes long, but if the data is invalid, we can have character values bigger
-  // than unibrow::Utf8::kMaxOneByteChar for more than 4 consecutive bytes.
-  while (current_data_length_ > current_data_offset_ &&
-         (c = current_data_[current_data_length_ - 1]) >
-             unibrow::Utf8::kMaxOneByteChar &&
-         utf8_split_char_buffer_length_ < 4) {
-    --current_data_length_;
-    ++utf8_split_char_buffer_length_;
-    if (c >= (3 << 6)) {
-      // 3 << 6 = 0b11000000; this is the first byte of the multi-byte
-      // character. No need to copy the previous characters into the conversion
-      // buffer (even if they're multi-byte).
-      break;
-    }
-  }
-  CHECK(utf8_split_char_buffer_length_ <= 4);
-  for (size_t i = 0; i < utf8_split_char_buffer_length_; ++i) {
-    utf8_split_char_buffer_[i] = current_data_[current_data_length_ + i];
-  }
-}
-
-
-// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUtf16CharacterStream
-
-ExternalTwoByteStringUtf16CharacterStream::
-    ~ExternalTwoByteStringUtf16CharacterStream() { }
+  const uc16* raw_data_;  // Pointer to the actual array of characters.
+  size_t start_pos_;
+  size_t end_pos_;
+};
 
 ExternalTwoByteStringUtf16CharacterStream::
     ExternalTwoByteStringUtf16CharacterStream(
-        Handle<ExternalTwoByteString> data, int start_position,
-        int end_position)
-    : raw_data_(data->GetTwoByteData(start_position)), bookmark_(kNoBookmark) {
-  buffer_cursor_ = raw_data_,
-  buffer_end_ = raw_data_ + (end_position - start_position);
-  pos_ = start_position;
+        Handle<ExternalTwoByteString> data, size_t start_position,
+        size_t end_position)
+    : raw_data_(data->GetTwoByteData(static_cast<int>(start_position))),
+      start_pos_(start_position),
+      end_pos_(end_position) {
+  buffer_start_ = raw_data_;
+  buffer_cursor_ = raw_data_;
+  buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
+  buffer_pos_ = start_pos_;
 }
 
-
-bool ExternalTwoByteStringUtf16CharacterStream::SetBookmark() {
-  bookmark_ = pos_;
-  return true;
-}
-
-
-void ExternalTwoByteStringUtf16CharacterStream::ResetToBookmark() {
-  DCHECK(bookmark_ != kNoBookmark);
-  pos_ = bookmark_;
-  buffer_cursor_ = raw_data_ + bookmark_;
+bool ExternalTwoByteStringUtf16CharacterStream::ReadBlock() {
+  size_t position = pos();
+  bool have_data = start_pos_ <= position && position < end_pos_;
+  if (have_data) {
+    buffer_pos_ = start_pos_;
+    buffer_cursor_ = raw_data_ + (position - start_pos_),
+    buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
+  } else {
+    buffer_pos_ = position;
+    buffer_cursor_ = raw_data_;
+    buffer_end_ = raw_data_;
+  }
+  return have_data;
 }
 
 // ----------------------------------------------------------------------------
 // ExternalOneByteStringUtf16CharacterStream
+//
+// A stream whose data source is a Handle<ExternalOneByteString>.
 
-ExternalOneByteStringUtf16CharacterStream::
-    ~ExternalOneByteStringUtf16CharacterStream() {}
+class ExternalOneByteStringUtf16CharacterStream
+    : public BufferedUtf16CharacterStream {
+ public:
+  ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
+                                            size_t start_position,
+                                            size_t end_position);
+
+  // For testing:
+  ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
+
+ protected:
+  size_t FillBuffer(size_t position) override;
+
+  const uint8_t* raw_data_;  // Pointer to the actual array of characters.
+  size_t length_;
+};
 
 ExternalOneByteStringUtf16CharacterStream::
     ExternalOneByteStringUtf16CharacterStream(
-        Handle<ExternalOneByteString> data, int start_position,
-        int end_position)
-    : raw_data_(data->GetChars()),
-      length_(end_position),
-      bookmark_(kNoBookmark) {
+        Handle<ExternalOneByteString> data, size_t start_position,
+        size_t end_position)
+    : raw_data_(data->GetChars()), length_(end_position) {
   DCHECK(end_position >= start_position);
-  pos_ = start_position;
+  buffer_pos_ = start_position;
 }
 
 ExternalOneByteStringUtf16CharacterStream::
     ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length)
-    : raw_data_(reinterpret_cast<const uint8_t*>(data)),
-      length_(length),
-      bookmark_(kNoBookmark) {}
-
-ExternalOneByteStringUtf16CharacterStream::
-    ExternalOneByteStringUtf16CharacterStream(const char* data)
-    : ExternalOneByteStringUtf16CharacterStream(data, strlen(data)) {}
-
-bool ExternalOneByteStringUtf16CharacterStream::SetBookmark() {
-  bookmark_ = pos_;
-  return true;
-}
-
-void ExternalOneByteStringUtf16CharacterStream::ResetToBookmark() {
-  DCHECK(bookmark_ != kNoBookmark);
-  pos_ = bookmark_;
-  buffer_cursor_ = buffer_;
-  buffer_end_ = buffer_ + FillBuffer(pos_);
-}
-
-size_t ExternalOneByteStringUtf16CharacterStream::BufferSeekForward(
-    size_t delta) {
-  size_t old_pos = pos_;
-  pos_ = Min(pos_ + delta, length_);
-  ReadBlock();
-  return pos_ - old_pos;
-}
+    : raw_data_(reinterpret_cast<const uint8_t*>(data)), length_(length) {}
 
 size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
   if (from_pos >= length_) return 0;
+
   size_t length = Min(kBufferSize, length_ - from_pos);
-  for (size_t i = 0; i < length; ++i) {
-    buffer_[i] = static_cast<uc16>(raw_data_[from_pos + i]);
-  }
+  i::CopyCharsUnsigned(buffer_, raw_data_ + from_pos, length);
   return length;
 }
 
+// ----------------------------------------------------------------------------
+// Utf8ExternalStreamingStream - chunked streaming of Utf-8 data.
+//
+// This implementation is fairly complex, since data arrives in chunks which
+// may 'cut' arbitrarily into utf-8 characters. Also, seeking to a given
+// character position is tricky because the byte position cannot be dericed
+// from the character position.
+
+class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+  Utf8ExternalStreamingStream(
+      ScriptCompiler::ExternalSourceStream* source_stream)
+      : current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
+        source_stream_(source_stream) {}
+  ~Utf8ExternalStreamingStream() override {
+    for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
+  }
+
+ protected:
+  size_t FillBuffer(size_t position) override;
+
+ private:
+  // A position within the data stream. It stores:
+  // - The 'physical' position (# of bytes in the stream),
+  // - the 'logical' position (# of ucs-2 characters, also within the stream),
+  // - a possibly incomplete utf-8 char at the current 'physical' position.
+  struct StreamPosition {
+    size_t bytes;
+    size_t chars;
+    unibrow::Utf8::Utf8IncrementalBuffer incomplete_char;
+  };
+
+  // Position contains a StreamPosition and the index of the chunk the position
+  // points into. (The chunk_no could be derived from pos, but that'd be
+  // an expensive search through all chunks.)
+  struct Position {
+    size_t chunk_no;
+    StreamPosition pos;
+  };
+
+  // A chunk in the list of chunks, containing:
+  // - The chunk data (data pointer and length), and
+  // - the position at the first byte of the chunk.
+  struct Chunk {
+    const uint8_t* data;
+    size_t length;
+    StreamPosition start;
+  };
+
+  // Within the current chunk, skip forward from current_ towards position.
+  bool SkipToPosition(size_t position);
+  // Within the current chunk, fill the buffer_ (while it has capacity).
+  void FillBufferFromCurrentChunk();
+  // Fetch a new chunk (assuming current_ is at the end of the current data).
+  bool FetchChunk();
+  // Search through the chunks and set current_ to point to the given position.
+  // (This call is potentially expensive.)
+  void SearchPosition(size_t position);
+
+  std::vector<Chunk> chunks_;
+  Position current_;
+  ScriptCompiler::ExternalSourceStream* source_stream_;
+};
+
+bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
+  DCHECK_LE(current_.pos.chars, position);  // We can only skip forward.
+
+  // Already there? Then return immediately.
+  if (current_.pos.chars == position) return true;
+
+  const Chunk& chunk = chunks_[current_.chunk_no];
+  DCHECK(current_.pos.bytes >= chunk.start.bytes);
+
+  unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
+      chunk.start.incomplete_char;
+  size_t it = current_.pos.bytes - chunk.start.bytes;
+  size_t chars = chunk.start.chars;
+  while (it < chunk.length && chars < position) {
+    unibrow::uchar t =
+        unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+    if (t != unibrow::Utf8::kIncomplete) {
+      chars++;
+      if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
+    }
+    it++;
+  }
+
+  current_.pos.bytes += it;
+  current_.pos.chars = chars;
+  current_.pos.incomplete_char = incomplete_char;
+  current_.chunk_no += (it == chunk.length);
+
+  return current_.pos.chars == position;
+}
+
+void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
+  DCHECK_LT(current_.chunk_no, chunks_.size());
+  DCHECK_EQ(buffer_start_, buffer_cursor_);
+  DCHECK_LT(buffer_end_ + 1, buffer_start_ + kBufferSize);
+
+  const Chunk& chunk = chunks_[current_.chunk_no];
+
+  // The buffer_ is writable, but buffer_*_ members are const. So we get a
+  // non-const pointer into buffer that points to the same char as buffer_end_.
+  uint16_t* cursor = buffer_ + (buffer_end_ - buffer_start_);
+  DCHECK_EQ(cursor, buffer_end_);
+
+  // If the current chunk is the last (empty) chunk we'll have to process
+  // any left-over, partial characters.
+  if (chunk.length == 0) {
+    unibrow::uchar t =
+        unibrow::Utf8::ValueOfIncrementalFinish(&current_.pos.incomplete_char);
+    if (t != unibrow::Utf8::kBufferEmpty) {
+      DCHECK(t < unibrow::Utf16::kMaxNonSurrogateCharCode);
+      *cursor = static_cast<uc16>(t);
+      buffer_end_++;
+      current_.pos.chars++;
+    }
+    return;
+  }
+
+  static const unibrow::uchar kUtf8Bom = 0xfeff;
+
+  unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
+      current_.pos.incomplete_char;
+  size_t it;
+  for (it = current_.pos.bytes - chunk.start.bytes;
+       it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize; it++) {
+    unibrow::uchar t =
+        unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+    if (t == unibrow::Utf8::kIncomplete) continue;
+    if (V8_LIKELY(t < kUtf8Bom)) {
+      *(cursor++) = static_cast<uc16>(t);  // The by most frequent case.
+    } else if (t == kUtf8Bom && current_.pos.bytes + it == 2) {
+      // BOM detected at beginning of the stream. Don't copy it.
+    } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      *(cursor++) = static_cast<uc16>(t);
+    } else {
+      *(cursor++) = unibrow::Utf16::LeadSurrogate(t);
+      *(cursor++) = unibrow::Utf16::TrailSurrogate(t);
+    }
+  }
+
+  current_.pos.bytes = chunk.start.bytes + it;
+  current_.pos.chars += (cursor - buffer_end_);
+  current_.pos.incomplete_char = incomplete_char;
+  current_.chunk_no += (it == chunk.length);
+
+  buffer_end_ = cursor;
+}
+
+bool Utf8ExternalStreamingStream::FetchChunk() {
+  DCHECK_EQ(current_.chunk_no, chunks_.size());
+  DCHECK(chunks_.empty() || chunks_.back().length != 0);
+
+  const uint8_t* chunk = nullptr;
+  size_t length = source_stream_->GetMoreData(&chunk);
+  chunks_.push_back({chunk, length, current_.pos});
+  return length > 0;
+}
+
+void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
+  // If current_ already points to the right position, we're done.
+  //
+  // This is expected to be the common case, since we typically call
+  // FillBuffer right after the current buffer.
+  if (current_.pos.chars == position) return;
+
+  // No chunks. Fetch at least one, so we can assume !chunks_.empty() below.
+  if (chunks_.empty()) {
+    DCHECK_EQ(current_.chunk_no, 0);
+    DCHECK_EQ(current_.pos.bytes, 0);
+    DCHECK_EQ(current_.pos.chars, 0);
+    FetchChunk();
+  }
+
+  // Search for the last chunk whose start position is less or equal to
+  // position.
+  size_t chunk_no = chunks_.size() - 1;
+  while (chunk_no > 0 && chunks_[chunk_no].start.chars > position) {
+    chunk_no--;
+  }
+
+  // Did we find the terminating (zero-length) chunk? Then we're seeking
+  // behind the end of the data, and position does not exist.
+  // Set current_ to point to the terminating chunk.
+  if (chunks_[chunk_no].length == 0) {
+    current_ = {chunk_no, chunks_[chunk_no].start};
+    return;
+  }
+
+  // Did we find the non-last chunk? Then our position must be within chunk_no.
+  if (chunk_no + 1 < chunks_.size()) {
+    // Fancy-pants optimization for ASCII chunks within a utf-8 stream.
+    // (Many web sites declare utf-8 encoding, but use only (or almost only) the
+    //  ASCII subset for their JavaScript sources. We can exploit this, by
+    //  checking whether the # bytes in a chunk are equal to the # chars, and if
+    //  so avoid the expensive SkipToPosition.)
+    bool ascii_only_chunk =
+        (chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
+        (chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
+    if (ascii_only_chunk) {
+      size_t skip = position - chunks_[chunk_no].start.chars;
+      current_ = {chunk_no,
+                  {chunks_[chunk_no].start.bytes + skip,
+                   chunks_[chunk_no].start.chars + skip,
+                   unibrow::Utf8::Utf8IncrementalBuffer(0)}};
+    } else {
+      current_ = {chunk_no, chunks_[chunk_no].start};
+      SkipToPosition(position);
+    }
+
+    // Since position was within the chunk, SkipToPosition should have found
+    // something.
+    DCHECK_EQ(position, current_.pos.chars);
+    return;
+  }
+
+  // What's left: We're in the last, non-terminating chunk. Our position
+  // may be in the chunk, but it may also be in 'future' chunks, which we'll
+  // have to obtain.
+  DCHECK_EQ(chunk_no, chunks_.size() - 1);
+  current_ = {chunk_no, chunks_[chunk_no].start};
+  bool have_more_data = true;
+  bool found = SkipToPosition(position);
+  while (have_more_data && !found) {
+    DCHECK_EQ(current_.chunk_no, chunks_.size());
+    have_more_data = FetchChunk();
+    found = have_more_data && SkipToPosition(position);
+  }
+
+  // We'll return with a postion != the desired position only if we're out
+  // of data. In that case, we'll point to the terminating chunk.
+  DCHECK_EQ(found, current_.pos.chars == position);
+  DCHECK_EQ(have_more_data, chunks_.back().length != 0);
+  DCHECK_IMPLIES(!found, !have_more_data);
+  DCHECK_IMPLIES(!found, current_.chunk_no == chunks_.size() - 1);
+}
+
+size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_;
+
+  SearchPosition(position);
+  bool out_of_data = current_.chunk_no != chunks_.size() &&
+                     chunks_[current_.chunk_no].length == 0;
+  if (out_of_data) return 0;
+
+  // Fill the buffer, until we have at least one char (or are out of data).
+  // (The embedder might give us 1-byte blocks within a utf-8 char, so we
+  //  can't guarantee progress with one chunk. Thus we iterate.)
+  while (!out_of_data && buffer_cursor_ == buffer_end_) {
+    // At end of current data, but there might be more? Then fetch it.
+    if (current_.chunk_no == chunks_.size()) {
+      out_of_data = !FetchChunk();
+    }
+    FillBufferFromCurrentChunk();
+  }
+
+  DCHECK_EQ(current_.pos.chars - position, buffer_end_ - buffer_cursor_);
+  return buffer_end_ - buffer_cursor_;
+}
+
+// ----------------------------------------------------------------------------
+// Chunks - helper for One- + TwoByteExternalStreamingStream
+namespace {
+
+struct Chunk {
+  const uint8_t* data;
+  size_t byte_length;
+  size_t byte_pos;
+};
+
+typedef std::vector<struct Chunk> Chunks;
+
+void DeleteChunks(Chunks& chunks) {
+  for (size_t i = 0; i < chunks.size(); i++) delete[] chunks[i].data;
+}
+
+// Return the chunk index for the chunk containing position.
+// If position is behind the end of the stream, the index of the last,
+// zero-length chunk is returned.
+size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
+                 size_t position) {
+  size_t end_pos =
+      chunks.empty() ? 0 : (chunks.back().byte_pos + chunks.back().byte_length);
+
+  // Get more data if needed. We usually won't enter the loop body.
+  bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
+  while (!out_of_data && end_pos <= position + 1) {
+    const uint8_t* chunk = nullptr;
+    size_t len = source_->GetMoreData(&chunk);
+
+    chunks.push_back({chunk, len, end_pos});
+    end_pos += len;
+    out_of_data = (len == 0);
+  }
+
+  // Here, we should always have at least one chunk, and we either have the
+  // chunk we were looking for, or we're out of data. Also, out_of_data and
+  // end_pos are current (and designate whether we have exhausted the stream,
+  // and the length of data received so far, respectively).
+  DCHECK(!chunks.empty());
+  DCHECK_EQ(end_pos, chunks.back().byte_pos + chunks.back().byte_length);
+  DCHECK_EQ(out_of_data, chunks.back().byte_length == 0);
+  DCHECK(position < end_pos || out_of_data);
+
+  // Edge case: position is behind the end of stream: Return the last (length 0)
+  // chunk to indicate the end of the stream.
+  if (position >= end_pos) {
+    DCHECK(out_of_data);
+    return chunks.size() - 1;
+  }
+
+  // We almost always 'stream', meaning we want data from the last chunk, so
+  // let's look at chunks back-to-front.
+  size_t chunk_no = chunks.size() - 1;
+  while (chunks[chunk_no].byte_pos > position) {
+    DCHECK_NE(chunk_no, 0);
+    chunk_no--;
+  }
+  DCHECK_LE(chunks[chunk_no].byte_pos, position);
+  DCHECK_LT(position, chunks[chunk_no].byte_pos + chunks[chunk_no].byte_length);
+  return chunk_no;
+}
+
+}  // anonymous namespace
+
+// ----------------------------------------------------------------------------
+// OneByteExternalStreamingStream
+//
+// A stream of latin-1 encoded, chunked data.
+
+class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+  explicit OneByteExternalStreamingStream(
+      ScriptCompiler::ExternalSourceStream* source)
+      : source_(source) {}
+  ~OneByteExternalStreamingStream() override { DeleteChunks(chunks_); }
+
+ protected:
+  size_t FillBuffer(size_t position) override;
+
+ private:
+  Chunks chunks_;
+  ScriptCompiler::ExternalSourceStream* source_;
+};
+
+size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
+  const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position)];
+  if (chunk.byte_length == 0) return 0;
+
+  size_t start_pos = position - chunk.byte_pos;
+  size_t len = i::Min(kBufferSize, chunk.byte_length - start_pos);
+  i::CopyCharsUnsigned(buffer_, chunk.data + start_pos, len);
+  return len;
+}
+
+// ----------------------------------------------------------------------------
+// TwoByteExternalStreamingStream
+//
+// A stream of ucs-2 data, delivered in chunks. Chunks may be 'cut' into the
+// middle of characters (or even contain only one byte), which adds a bit
+// of complexity. This stream avoid all data copying, except for characters
+// that cross chunk boundaries.
+
+class TwoByteExternalStreamingStream : public Utf16CharacterStream {
+ public:
+  explicit TwoByteExternalStreamingStream(
+      ScriptCompiler::ExternalSourceStream* source);
+  ~TwoByteExternalStreamingStream() override;
+
+ protected:
+  bool ReadBlock() override;
+
+  Chunks chunks_;
+  ScriptCompiler::ExternalSourceStream* source_;
+  uc16 one_char_buffer_;
+};
+
+TwoByteExternalStreamingStream::TwoByteExternalStreamingStream(
+    ScriptCompiler::ExternalSourceStream* source)
+    : Utf16CharacterStream(&one_char_buffer_, &one_char_buffer_,
+                           &one_char_buffer_, 0),
+      source_(source),
+      one_char_buffer_(0) {}
+
+TwoByteExternalStreamingStream::~TwoByteExternalStreamingStream() {
+  DeleteChunks(chunks_);
+}
+
+bool TwoByteExternalStreamingStream::ReadBlock() {
+  size_t position = pos();
+
+  // We'll search for the 2nd byte of our character, to make sure we
+  // have enough data for at least one character.
+  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+
+  // Out of data? Return 0.
+  if (chunks_[chunk_no].byte_length == 0) {
+    buffer_cursor_ = buffer_start_;
+    buffer_end_ = buffer_start_;
+    return false;
+  }
+
+  Chunk& current = chunks_[chunk_no];
+
+  // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
+  // character may be split between the previous and the current chunk.
+  // If we find such a lonely byte at the beginning of the chunk, we'll use
+  // one_char_buffer_ to hold the full character.
+  bool lonely_byte = (chunks_[chunk_no].byte_pos == (2 * position + 1));
+  if (lonely_byte) {
+    DCHECK_NE(chunk_no, 0);
+    Chunk& previous_chunk = chunks_[chunk_no - 1];
+#ifdef V8_TARGET_BIG_ENDIAN
+    uc16 character = current.data[0] |
+                     previous_chunk.data[previous_chunk.byte_length - 1] << 8;
+#else
+    uc16 character = previous_chunk.data[previous_chunk.byte_length - 1] |
+                     current.data[0] << 8;
+#endif
+
+    one_char_buffer_ = character;
+    buffer_pos_ = position;
+    buffer_start_ = &one_char_buffer_;
+    buffer_cursor_ = &one_char_buffer_;
+    buffer_end_ = &one_char_buffer_ + 1;
+    return true;
+  }
+
+  // Common case: character is in current chunk.
+  DCHECK_LE(current.byte_pos, 2 * position);
+  DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
+
+  // Determine # of full ucs-2 chars in stream, and whether we started on an odd
+  // byte boundary.
+  bool odd_start = (current.byte_pos % 2) == 1;
+  size_t number_chars = (current.byte_length - odd_start) / 2;
+
+  // Point the buffer_*_ members into the current chunk and set buffer_cursor_
+  // to point to position. Be careful when converting the byte positions (in
+  // Chunk) to the ucs-2 character positions (in buffer_*_ members).
+  buffer_start_ = reinterpret_cast<const uint16_t*>(current.data + odd_start);
+  buffer_end_ = buffer_start_ + number_chars;
+  buffer_pos_ = (current.byte_pos + odd_start) / 2;
+  buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+  DCHECK_EQ(position, pos());
+  return true;
+}
+
+// ----------------------------------------------------------------------------
+// ScannerStream: Create stream instances.
+
+Utf16CharacterStream* ScannerStream::For(Handle<String> data) {
+  return ScannerStream::For(data, 0, data->length());
+}
+
+Utf16CharacterStream* ScannerStream::For(Handle<String> data, int start_pos,
+                                         int end_pos) {
+  DCHECK(start_pos >= 0);
+  DCHECK(end_pos <= data->length());
+  if (data->IsExternalOneByteString()) {
+    return new ExternalOneByteStringUtf16CharacterStream(
+        Handle<ExternalOneByteString>::cast(data), start_pos, end_pos);
+  } else if (data->IsExternalTwoByteString()) {
+    return new ExternalTwoByteStringUtf16CharacterStream(
+        Handle<ExternalTwoByteString>::cast(data), start_pos, end_pos);
+  } else {
+    // TODO(vogelheim): Maybe call data.Flatten() first?
+    return new GenericStringUtf16CharacterStream(data, start_pos, end_pos);
+  }
+}
+
+std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
+    const char* data) {
+  return ScannerStream::ForTesting(data, strlen(data));
+}
+
+std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
+    const char* data, size_t length) {
+  return std::unique_ptr<Utf16CharacterStream>(
+      new ExternalOneByteStringUtf16CharacterStream(data, length));
+}
+
+Utf16CharacterStream* ScannerStream::For(
+    ScriptCompiler::ExternalSourceStream* source_stream,
+    v8::ScriptCompiler::StreamedSource::Encoding encoding) {
+  switch (encoding) {
+    case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
+      return new TwoByteExternalStreamingStream(source_stream);
+    case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
+      return new OneByteExternalStreamingStream(source_stream);
+    case v8::ScriptCompiler::StreamedSource::UTF8:
+      return new Utf8ExternalStreamingStream(source_stream);
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/parsing/scanner-character-streams.h b/src/parsing/scanner-character-streams.h
index 94d8284..ac81613 100644
--- a/src/parsing/scanner-character-streams.h
+++ b/src/parsing/scanner-character-streams.h
@@ -5,187 +5,27 @@
 #ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
 #define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
 
+#include "include/v8.h"  // for v8::ScriptCompiler
 #include "src/handles.h"
-#include "src/parsing/scanner.h"
-#include "src/vector.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class ExternalTwoByteString;
-class ExternalOneByteString;
+class Utf16CharacterStream;
 
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos_ pointing to any position,
-// even positions before the current).
-class BufferedUtf16CharacterStream: public Utf16CharacterStream {
+class ScannerStream {
  public:
-  BufferedUtf16CharacterStream();
-  ~BufferedUtf16CharacterStream() override;
-
-  void PushBack(uc32 character) override;
-
- protected:
-  static const size_t kBufferSize = 512;
-  static const size_t kPushBackStepSize = 16;
-
-  size_t SlowSeekForward(size_t delta) override;
-  bool ReadBlock() override;
-  virtual void SlowPushBack(uc16 character);
-
-  virtual size_t BufferSeekForward(size_t delta) = 0;
-  virtual size_t FillBuffer(size_t position) = 0;
-
-  const uc16* pushback_limit_;
-  uc16 buffer_[kBufferSize];
-};
-
-
-// Generic string stream.
-class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
- public:
-  GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
-                                    size_t end_position);
-  ~GenericStringUtf16CharacterStream() override;
-
-  bool SetBookmark() override;
-  void ResetToBookmark() override;
-
- protected:
-  static const size_t kNoBookmark = -1;
-
-  size_t BufferSeekForward(size_t delta) override;
-  size_t FillBuffer(size_t position) override;
-
-  Handle<String> string_;
-  size_t length_;
-  size_t bookmark_;
-};
-
-
-// ExternalStreamingStream is a wrapper around an ExternalSourceStream (see
-// include/v8.h) subclass implemented by the embedder.
-class ExternalStreamingStream : public BufferedUtf16CharacterStream {
- public:
-  ExternalStreamingStream(ScriptCompiler::ExternalSourceStream* source_stream,
-                          v8::ScriptCompiler::StreamedSource::Encoding encoding)
-      : source_stream_(source_stream),
-        encoding_(encoding),
-        current_data_(NULL),
-        current_data_offset_(0),
-        current_data_length_(0),
-        utf8_split_char_buffer_length_(0),
-        bookmark_(0),
-        bookmark_data_is_from_current_data_(false),
-        bookmark_data_offset_(0),
-        bookmark_utf8_split_char_buffer_length_(0) {}
-
-  ~ExternalStreamingStream() override {
-    delete[] current_data_;
-    bookmark_buffer_.Dispose();
-    bookmark_data_.Dispose();
-  }
-
-  size_t BufferSeekForward(size_t delta) override {
-    // We never need to seek forward when streaming scripts. We only seek
-    // forward when we want to parse a function whose location we already know,
-    // and when streaming, we don't know the locations of anything we haven't
-    // seen yet.
-    UNREACHABLE();
-    return 0;
-  }
-
-  size_t FillBuffer(size_t position) override;
-
-  bool SetBookmark() override;
-  void ResetToBookmark() override;
-
- private:
-  void HandleUtf8SplitCharacters(size_t* data_in_buffer);
-  void FlushCurrent();
-
-  ScriptCompiler::ExternalSourceStream* source_stream_;
-  v8::ScriptCompiler::StreamedSource::Encoding encoding_;
-  const uint8_t* current_data_;
-  size_t current_data_offset_;
-  size_t current_data_length_;
-  // For converting UTF-8 characters which are split across two data chunks.
-  uint8_t utf8_split_char_buffer_[4];
-  size_t utf8_split_char_buffer_length_;
-
-  // Bookmark support. See comments in ExternalStreamingStream::SetBookmark
-  // for additional details.
-  size_t bookmark_;
-  Vector<uint16_t> bookmark_buffer_;
-  Vector<uint8_t> bookmark_data_;
-  bool bookmark_data_is_from_current_data_;
-  size_t bookmark_data_offset_;
-  uint8_t bookmark_utf8_split_char_buffer_[4];
-  size_t bookmark_utf8_split_char_buffer_length_;
-};
-
-
-// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
- public:
-  ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
-                                            int start_position,
-                                            int end_position);
-  ~ExternalTwoByteStringUtf16CharacterStream() override;
-
-  void PushBack(uc32 character) override {
-    DCHECK(buffer_cursor_ > raw_data_);
-    pos_--;
-    if (character != kEndOfInput) {
-      buffer_cursor_--;
-    }
-  }
-
-  bool SetBookmark() override;
-  void ResetToBookmark() override;
-
- private:
-  size_t SlowSeekForward(size_t delta) override {
-    // Fast case always handles seeking.
-    return 0;
-  }
-  bool ReadBlock() override {
-    // Entire string is read at start.
-    return false;
-  }
-  const uc16* raw_data_;  // Pointer to the actual array of characters.
-
-  static const size_t kNoBookmark = -1;
-
-  size_t bookmark_;
-};
-
-// UTF16 buffer to read characters from an external latin1 string.
-class ExternalOneByteStringUtf16CharacterStream
-    : public BufferedUtf16CharacterStream {
- public:
-  ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
-                                            int start_position,
-                                            int end_position);
-  ~ExternalOneByteStringUtf16CharacterStream() override;
+  static Utf16CharacterStream* For(Handle<String> data);
+  static Utf16CharacterStream* For(Handle<String> data, int start_pos,
+                                   int end_pos);
+  static Utf16CharacterStream* For(
+      ScriptCompiler::ExternalSourceStream* source_stream,
+      ScriptCompiler::StreamedSource::Encoding encoding);
 
   // For testing:
-  explicit ExternalOneByteStringUtf16CharacterStream(const char* data);
-  ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
-
-  bool SetBookmark() override;
-  void ResetToBookmark() override;
-
- private:
-  static const size_t kNoBookmark = -1;
-
-  size_t BufferSeekForward(size_t delta) override;
-  size_t FillBuffer(size_t position) override;
-
-  const uint8_t* raw_data_;  // Pointer to the actual array of characters.
-  size_t length_;
-  size_t bookmark_;
+  static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
+  static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data,
+                                                          size_t length);
 };
 
 }  // namespace internal
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 06ead2e..e41b56f 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -14,7 +14,7 @@
 #include "src/char-predicates-inl.h"
 #include "src/conversions-inl.h"
 #include "src/list-inl.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/duplicate-finder.h"  // For Scanner::FindSymbol
 
 namespace v8 {
 namespace internal {
@@ -26,25 +26,60 @@
   return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
 }
 
+// ----------------------------------------------------------------------------
+// Scanner::BookmarkScope
 
-// Default implementation for streams that do not support bookmarks.
-bool Utf16CharacterStream::SetBookmark() { return false; }
-void Utf16CharacterStream::ResetToBookmark() { UNREACHABLE(); }
+const size_t Scanner::BookmarkScope::kBookmarkAtFirstPos =
+    std::numeric_limits<size_t>::max() - 2;
+const size_t Scanner::BookmarkScope::kNoBookmark =
+    std::numeric_limits<size_t>::max() - 1;
+const size_t Scanner::BookmarkScope::kBookmarkWasApplied =
+    std::numeric_limits<size_t>::max();
 
+void Scanner::BookmarkScope::Set() {
+  DCHECK_EQ(bookmark_, kNoBookmark);
+  DCHECK_EQ(scanner_->next_next_.token, Token::UNINITIALIZED);
+
+  // The first token is a bit special, since current_ will still be
+  // uninitialized. In this case, store kBookmarkAtFirstPos and special-case it
+  // when
+  // applying the bookmark.
+  DCHECK_IMPLIES(
+      scanner_->current_.token == Token::UNINITIALIZED,
+      scanner_->current_.location.beg_pos == scanner_->next_.location.beg_pos);
+  bookmark_ = (scanner_->current_.token == Token::UNINITIALIZED)
+                  ? kBookmarkAtFirstPos
+                  : scanner_->location().beg_pos;
+}
+
+void Scanner::BookmarkScope::Apply() {
+  DCHECK(HasBeenSet());  // Caller hasn't called SetBookmark.
+  if (bookmark_ == kBookmarkAtFirstPos) {
+    scanner_->SeekNext(0);
+  } else {
+    scanner_->SeekNext(bookmark_);
+    scanner_->Next();
+    DCHECK_EQ(scanner_->location().beg_pos, bookmark_);
+  }
+  bookmark_ = kBookmarkWasApplied;
+}
+
+bool Scanner::BookmarkScope::HasBeenSet() {
+  return bookmark_ != kNoBookmark && bookmark_ != kBookmarkWasApplied;
+}
+
+bool Scanner::BookmarkScope::HasBeenApplied() {
+  return bookmark_ == kBookmarkWasApplied;
+}
 
 // ----------------------------------------------------------------------------
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
     : unicode_cache_(unicode_cache),
-      bookmark_c0_(kNoBookmark),
       octal_pos_(Location::invalid()),
       decimal_with_leading_zero_pos_(Location::invalid()),
       found_html_comment_(false) {
-  bookmark_current_.literal_chars = &bookmark_current_literal_;
-  bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
-  bookmark_next_.literal_chars = &bookmark_next_literal_;
-  bookmark_next_.raw_literal_chars = &bookmark_next_raw_literal_;
 }
 
 
@@ -305,14 +340,14 @@
   return c == 0xFFFE;
 }
 
-
 bool Scanner::SkipWhiteSpace() {
   int start_position = source_pos();
 
   while (true) {
     while (true) {
-      // The unicode cache accepts unsigned inputs.
-      if (c0_ < 0) break;
+      // Don't skip behind the end of input.
+      if (c0_ == kEndOfInput) break;
+
       // Advance as long as character is a WhiteSpace or LineTerminator.
       // Remember if the latter is the case.
       if (unicode_cache_->IsLineTerminator(c0_)) {
@@ -328,25 +363,27 @@
     // line (with only whitespace in front of it), we treat the rest
     // of the line as a comment. This is in line with the way
     // SpiderMonkey handles it.
-    if (c0_ == '-' && has_line_terminator_before_next_) {
-      Advance();
-      if (c0_ == '-') {
-        Advance();
-        if (c0_ == '>') {
-          // Treat the rest of the line as a comment.
-          SkipSingleLineComment();
-          // Continue skipping white space after the comment.
-          continue;
-        }
-        PushBack('-');  // undo Advance()
-      }
-      PushBack('-');  // undo Advance()
-    }
-    // Return whether or not we skipped any characters.
-    return source_pos() != start_position;
-  }
-}
+    if (c0_ != '-' || !has_line_terminator_before_next_) break;
 
+    Advance();
+    if (c0_ != '-') {
+      PushBack('-');  // undo Advance()
+      break;
+    }
+
+    Advance();
+    if (c0_ != '>') {
+      PushBack2('-', '-');  // undo 2x Advance();
+      break;
+    }
+
+    // Treat the rest of the line as a comment.
+    SkipSingleLineComment();
+  }
+
+  // Return whether or not we skipped any characters.
+  return source_pos() != start_position;
+}
 
 Token::Value Scanner::SkipSingleLineComment() {
   Advance();
@@ -356,7 +393,7 @@
   // separately by the lexical grammar and becomes part of the
   // stream of input elements for the syntactic grammar (see
   // ECMA-262, section 7.4).
-  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+  while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
     Advance();
   }
 
@@ -366,7 +403,7 @@
 
 Token::Value Scanner::SkipSourceURLComment() {
   TryToParseSourceURLComment();
-  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+  while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
     Advance();
   }
 
@@ -377,11 +414,11 @@
 void Scanner::TryToParseSourceURLComment() {
   // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
   // function will just return if it cannot parse a magic comment.
-  if (c0_ < 0 || !unicode_cache_->IsWhiteSpace(c0_)) return;
+  if (c0_ == kEndOfInput || !unicode_cache_->IsWhiteSpace(c0_)) return;
   Advance();
   LiteralBuffer name;
-  while (c0_ >= 0 && !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) &&
-         c0_ != '=') {
+  while (c0_ != kEndOfInput &&
+         !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && c0_ != '=') {
     name.AddChar(c0_);
     Advance();
   }
@@ -399,10 +436,10 @@
     return;
   Advance();
   value->Reset();
-  while (c0_ >= 0 && unicode_cache_->IsWhiteSpace(c0_)) {
+  while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
     Advance();
   }
-  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+  while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
     // Disallowed characters.
     if (c0_ == '"' || c0_ == '\'') {
       value->Reset();
@@ -415,7 +452,7 @@
     Advance();
   }
   // Allow whitespace at the end.
-  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+  while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
     if (!unicode_cache_->IsWhiteSpace(c0_)) {
       value->Reset();
       break;
@@ -429,10 +466,10 @@
   DCHECK(c0_ == '*');
   Advance();
 
-  while (c0_ >= 0) {
+  while (c0_ != kEndOfInput) {
     uc32 ch = c0_;
     Advance();
-    if (c0_ >= 0 && unicode_cache_->IsLineTerminator(ch)) {
+    if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(ch)) {
       // Following ECMA-262, section 7.4, a comment containing
       // a newline will make the comment count as a line-terminator.
       has_multiline_comment_before_next_ = true;
@@ -450,24 +487,24 @@
   return Token::ILLEGAL;
 }
 
-
 Token::Value Scanner::ScanHtmlComment() {
   // Check for <!-- comments.
   DCHECK(c0_ == '!');
   Advance();
-  if (c0_ == '-') {
-    Advance();
-    if (c0_ == '-') {
-      found_html_comment_ = true;
-      return SkipSingleLineComment();
-    }
-    PushBack('-');  // undo Advance()
+  if (c0_ != '-') {
+    PushBack('!');  // undo Advance()
+    return Token::LT;
   }
-  PushBack('!');  // undo Advance()
-  DCHECK(c0_ == '!');
-  return Token::LT;
-}
 
+  Advance();
+  if (c0_ != '-') {
+    PushBack2('-', '!');  // undo 2x Advance()
+    return Token::LT;
+  }
+
+  found_html_comment_ = true;
+  return SkipSingleLineComment();
+}
 
 void Scanner::Scan() {
   next_.literal_chars = NULL;
@@ -716,7 +753,7 @@
         break;
 
       default:
-        if (c0_ < 0) {
+        if (c0_ == kEndOfInput) {
           token = Token::EOS;
         } else if (unicode_cache_->IsIdentifierStart(c0_)) {
           token = ScanIdentifierOrKeyword();
@@ -790,7 +827,7 @@
   // Positions inside the lookahead token aren't supported.
   DCHECK(pos >= current_pos);
   if (pos != current_pos) {
-    source_->SeekForward(pos - source_->pos());
+    source_->Seek(pos);
     Advance();
     // This function is only called to seek to the location
     // of the end of a function (at the "}" token). It doesn't matter
@@ -808,7 +845,8 @@
   Advance<capture_raw>();
 
   // Skip escaped newlines.
-  if (!in_template_literal && c0_ >= 0 && unicode_cache_->IsLineTerminator(c)) {
+  if (!in_template_literal && c0_ != kEndOfInput &&
+      unicode_cache_->IsLineTerminator(c)) {
     // Allow CR+LF newlines in multiline string literals.
     if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
     // Allow LF+CR newlines in multiline string literals.
@@ -894,7 +932,7 @@
       HandleLeadSurrogate();
       break;
     }
-    if (c0_ < 0 || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
+    if (c0_ == kEndOfInput || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
     if (c0_ == quote) {
       literal.Complete();
       Advance<false, false>();
@@ -906,12 +944,12 @@
     AddLiteralChar(c);
   }
 
-  while (c0_ != quote && c0_ >= 0
-         && !unicode_cache_->IsLineTerminator(c0_)) {
+  while (c0_ != quote && c0_ != kEndOfInput &&
+         !unicode_cache_->IsLineTerminator(c0_)) {
     uc32 c = c0_;
     Advance();
     if (c == '\\') {
-      if (c0_ < 0 || !ScanEscape<false, false>()) {
+      if (c0_ == kEndOfInput || !ScanEscape<false, false>()) {
         return Token::ILLEGAL;
       }
     } else {
@@ -957,7 +995,7 @@
       ReduceRawLiteralLength(2);
       break;
     } else if (c == '\\') {
-      if (c0_ > 0 && unicode_cache_->IsLineTerminator(c0_)) {
+      if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(c0_)) {
         // The TV of LineContinuation :: \ LineTerminatorSequence is the empty
         // code unit sequence.
         uc32 lastChar = c0_;
@@ -1155,7 +1193,7 @@
   // section 7.8.3, page 17 (note that we read only one decimal digit
   // if the value is 0).
   if (IsDecimalDigit(c0_) ||
-      (c0_ >= 0 && unicode_cache_->IsIdentifierStart(c0_)))
+      (c0_ != kEndOfInput && unicode_cache_->IsIdentifierStart(c0_)))
     return Token::ILLEGAL;
 
   literal.Complete();
@@ -1382,7 +1420,7 @@
   }
 
   // Scan the rest of the identifier characters.
-  while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+  while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ != '\\') {
       uc32 next_char = c0_;
       Advance();
@@ -1408,7 +1446,7 @@
 Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
                                            bool escaped) {
   // Scan the rest of the identifier characters.
-  while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+  while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
       uc32 c = ScanIdentifierUnicodeEscape();
       escaped = true;
@@ -1465,10 +1503,12 @@
   }
 
   while (c0_ != '/' || in_character_class) {
-    if (c0_ < 0 || unicode_cache_->IsLineTerminator(c0_)) return false;
+    if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+      return false;
     if (c0_ == '\\') {  // Escape sequence.
       AddLiteralCharAdvance();
-      if (c0_ < 0 || unicode_cache_->IsLineTerminator(c0_)) return false;
+      if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+        return false;
       AddLiteralCharAdvance();
       // If the escape allows more characters, i.e., \x??, \u????, or \c?,
       // only "safe" characters are allowed (letters, digits, underscore),
@@ -1499,7 +1539,7 @@
 
   // Scan regular expression flags.
   int flags = 0;
-  while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+  while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
     RegExp::Flags flag = RegExp::kNone;
     switch (c0_) {
       case 'g':
@@ -1574,202 +1614,31 @@
 
 
 int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+  // TODO(vogelheim): Move this logic into the calling class; this can be fully
+  //                  implemented using the public interface.
   if (is_literal_one_byte()) {
     return finder->AddOneByteSymbol(literal_one_byte_string(), value);
   }
   return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
 }
 
+void Scanner::SeekNext(size_t position) {
+  // Use with care: This cleanly resets most, but not all scanner state.
+  // TODO(vogelheim): Fix this, or at least DCHECK the relevant conditions.
 
-bool Scanner::SetBookmark() {
-  if (c0_ != kNoBookmark && bookmark_c0_ == kNoBookmark &&
-      next_next_.token == Token::UNINITIALIZED && source_->SetBookmark()) {
-    bookmark_c0_ = c0_;
-    CopyTokenDesc(&bookmark_current_, &current_);
-    CopyTokenDesc(&bookmark_next_, &next_);
-    return true;
-  }
-  return false;
-}
-
-
-void Scanner::ResetToBookmark() {
-  DCHECK(BookmarkHasBeenSet());  // Caller hasn't called SetBookmark.
-
-  source_->ResetToBookmark();
-  c0_ = bookmark_c0_;
-  CopyToNextTokenDesc(&bookmark_current_);
-  current_ = next_;
-  CopyToNextTokenDesc(&bookmark_next_);
-  bookmark_c0_ = kBookmarkWasApplied;
-}
-
-
-bool Scanner::BookmarkHasBeenSet() { return bookmark_c0_ >= 0; }
-
-
-bool Scanner::BookmarkHasBeenReset() {
-  return bookmark_c0_ == kBookmarkWasApplied;
-}
-
-
-void Scanner::DropBookmark() { bookmark_c0_ = kNoBookmark; }
-
-void Scanner::CopyToNextTokenDesc(TokenDesc* from) {
-  StartLiteral();
-  StartRawLiteral();
-  CopyTokenDesc(&next_, from);
-  if (next_.literal_chars->length() == 0) next_.literal_chars = nullptr;
-  if (next_.raw_literal_chars->length() == 0) next_.raw_literal_chars = nullptr;
-}
-
-void Scanner::CopyTokenDesc(TokenDesc* to, TokenDesc* from) {
-  DCHECK_NOT_NULL(to);
-  DCHECK_NOT_NULL(from);
-  to->token = from->token;
-  to->location = from->location;
-  to->literal_chars->CopyFrom(from->literal_chars);
-  to->raw_literal_chars->CopyFrom(from->raw_literal_chars);
-}
-
-
-int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
-  return AddSymbol(key, true, value);
-}
-
-
-int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
-  return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
-}
-
-
-int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
-                               bool is_one_byte,
-                               int value) {
-  uint32_t hash = Hash(key, is_one_byte);
-  byte* encoding = BackupKey(key, is_one_byte);
-  base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
-  int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-  entry->value =
-    reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
-  return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
-  DCHECK(key.length() > 0);
-  // Quick check for already being in canonical form.
-  if (IsNumberCanonical(key)) {
-    return AddOneByteSymbol(key, value);
-  }
-
-  int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
-  double double_value = StringToDouble(
-      unicode_constants_, key, flags, 0.0);
-  int length;
-  const char* string;
-  if (!std::isfinite(double_value)) {
-    string = "Infinity";
-    length = 8;  // strlen("Infinity");
-  } else {
-    string = DoubleToCString(double_value,
-                             Vector<char>(number_buffer_, kBufferSize));
-    length = StrLength(string);
-  }
-  return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string),
-                                      length), true, value);
-}
-
-
-bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
-  // Test for a safe approximation of number literals that are already
-  // in canonical form: max 15 digits, no leading zeroes, except an
-  // integer part that is a single zero, and no trailing zeros below
-  // the decimal point.
-  int pos = 0;
-  int length = number.length();
-  if (number.length() > 15) return false;
-  if (number[pos] == '0') {
-    pos++;
-  } else {
-    while (pos < length &&
-           static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
-  }
-  if (length == pos) return true;
-  if (number[pos] != '.') return false;
-  pos++;
-  bool invalid_last_digit = true;
-  while (pos < length) {
-    uint8_t digit = number[pos] - '0';
-    if (digit > '9' - '0') return false;
-    invalid_last_digit = (digit == 0);
-    pos++;
-  }
-  return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
-  // Primitive hash function, almost identical to the one used
-  // for strings (except that it's seeded by the length and representation).
-  int length = key.length();
-  uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0);
-  for (int i = 0; i < length; i++) {
-    uint32_t c = key[i];
-    hash = (hash + c) * 1025;
-    hash ^= (hash >> 6);
-  }
-  return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
-  // Decode lengths.
-  // Length + representation is encoded as base 128, most significant heptet
-  // first, with a 8th bit being non-zero while there are more heptets.
-  // The value encodes the number of bytes following, and whether the original
-  // was Latin1.
-  byte* s1 = reinterpret_cast<byte*>(first);
-  byte* s2 = reinterpret_cast<byte*>(second);
-  uint32_t length_one_byte_field = 0;
-  byte c1;
-  do {
-    c1 = *s1;
-    if (c1 != *s2) return false;
-    length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
-    s1++;
-    s2++;
-  } while ((c1 & 0x80) != 0);
-  int length = static_cast<int>(length_one_byte_field >> 1);
-  return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
-                                 bool is_one_byte) {
-  uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
-  backing_store_.StartSequence();
-  // Emit one_byte_length as base-128 encoded number, with the 7th bit set
-  // on the byte of every heptet except the last, least significant, one.
-  if (one_byte_length >= (1 << 7)) {
-    if (one_byte_length >= (1 << 14)) {
-      if (one_byte_length >= (1 << 21)) {
-        if (one_byte_length >= (1 << 28)) {
-          backing_store_.Add(
-              static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
-        }
-        backing_store_.Add(
-            static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
-      }
-      backing_store_.Add(
-          static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
-    }
-    backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
-  }
-  backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
-
-  backing_store_.AddBlock(bytes);
-  return backing_store_.EndSequence().start();
+  // To re-scan from a given character position, we need to:
+  // 1, Reset the current_, next_ and next_next_ tokens
+  //    (next_ + next_next_ will be overwrittem by Next(),
+  //     current_ will remain unchanged, so overwrite it fully.)
+  current_ = {{0, 0}, nullptr, nullptr, 0, Token::UNINITIALIZED};
+  next_.token = Token::UNINITIALIZED;
+  next_next_.token = Token::UNINITIALIZED;
+  // 2, reset the source to the desired position,
+  source_->Seek(position);
+  // 3, re-scan, by scanning the look-ahead char + 1 token (next_).
+  c0_ = source_->Advance();
+  Next();
+  DCHECK_EQ(next_.location.beg_pos, position);
 }
 
 }  // namespace internal
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 66c6ce8..b2b1a8a 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -8,12 +8,9 @@
 #define V8_PARSING_SCANNER_H_
 
 #include "src/allocation.h"
-#include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/char-predicates.h"
-#include "src/collector.h"
 #include "src/globals.h"
-#include "src/list.h"
 #include "src/messages.h"
 #include "src/parsing/token.h"
 #include "src/unicode-decoder.h"
@@ -25,127 +22,127 @@
 
 class AstRawString;
 class AstValueFactory;
+class DuplicateFinder;
+class ExternalOneByteString;
+class ExternalTwoByteString;
 class ParserRecorder;
 class UnicodeCache;
 
-
 // ---------------------------------------------------------------------
 // Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
 // A code unit is a 16 bit value representing either a 16 bit code point
 // or one part of a surrogate pair that make a single 21 bit code point.
-
 class Utf16CharacterStream {
  public:
-  Utf16CharacterStream() : pos_(0) { }
+  static const uc32 kEndOfInput = -1;
+
   virtual ~Utf16CharacterStream() { }
 
   // Returns and advances past the next UTF-16 code unit in the input
-  // stream. If there are no more code units, it returns a negative
-  // value.
+  // stream. If there are no more code units it returns kEndOfInput.
   inline uc32 Advance() {
-    if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
-      pos_++;
+    if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
       return static_cast<uc32>(*(buffer_cursor_++));
+    } else if (ReadBlock()) {
+      return static_cast<uc32>(*(buffer_cursor_++));
+    } else {
+      // Note: currently the following increment is necessary to avoid a
+      // parser problem! The scanner treats the final kEndOfInput as
+      // a code unit with a position, and does math relative to that
+      // position.
+      buffer_cursor_++;
+      return kEndOfInput;
     }
-    // Note: currently the following increment is necessary to avoid a
-    // parser problem! The scanner treats the final kEndOfInput as
-    // a code unit with a position, and does math relative to that
-    // position.
-    pos_++;
-
-    return kEndOfInput;
   }
 
-  // Return the current position in the code unit stream.
-  // Starts at zero.
-  inline size_t pos() const { return pos_; }
-
-  // Skips forward past the next code_unit_count UTF-16 code units
-  // in the input, or until the end of input if that comes sooner.
-  // Returns the number of code units actually skipped. If less
-  // than code_unit_count,
-  inline size_t SeekForward(size_t code_unit_count) {
-    size_t buffered_chars = buffer_end_ - buffer_cursor_;
-    if (code_unit_count <= buffered_chars) {
-      buffer_cursor_ += code_unit_count;
-      pos_ += code_unit_count;
-      return code_unit_count;
+  // Go back one by one character in the input stream.
+  // This undoes the most recent Advance().
+  inline void Back() {
+    // The common case - if the previous character is within
+    // buffer_start_ .. buffer_end_ will be handles locally.
+    // Otherwise, a new block is requested.
+    if (V8_LIKELY(buffer_cursor_ > buffer_start_)) {
+      buffer_cursor_--;
+    } else {
+      ReadBlockAt(pos() - 1);
     }
-    return SlowSeekForward(code_unit_count);
   }
 
-  // Pushes back the most recently read UTF-16 code unit (or negative
-  // value if at end of input), i.e., the value returned by the most recent
-  // call to Advance.
-  // Must not be used right after calling SeekForward.
-  virtual void PushBack(int32_t code_unit) = 0;
+  // Go back one by two characters in the input stream. (This is the same as
+  // calling Back() twice. But Back() may - in some instances - do substantial
+  // work. Back2() guarantees this work will be done only once.)
+  inline void Back2() {
+    if (V8_LIKELY(buffer_cursor_ - 2 >= buffer_start_)) {
+      buffer_cursor_ -= 2;
+    } else {
+      ReadBlockAt(pos() - 2);
+    }
+  }
 
-  virtual bool SetBookmark();
-  virtual void ResetToBookmark();
+  inline size_t pos() const {
+    return buffer_pos_ + (buffer_cursor_ - buffer_start_);
+  }
+
+  inline void Seek(size_t pos) {
+    if (V8_LIKELY(pos >= buffer_pos_ &&
+                  pos < (buffer_pos_ + (buffer_end_ - buffer_start_)))) {
+      buffer_cursor_ = buffer_start_ + (pos - buffer_pos_);
+    } else {
+      ReadBlockAt(pos);
+    }
+  }
 
  protected:
-  static const uc32 kEndOfInput = -1;
+  Utf16CharacterStream(const uint16_t* buffer_start,
+                       const uint16_t* buffer_cursor,
+                       const uint16_t* buffer_end, size_t buffer_pos)
+      : buffer_start_(buffer_start),
+        buffer_cursor_(buffer_cursor),
+        buffer_end_(buffer_end),
+        buffer_pos_(buffer_pos) {}
+  Utf16CharacterStream() : Utf16CharacterStream(nullptr, nullptr, nullptr, 0) {}
 
-  // Ensures that the buffer_cursor_ points to the code_unit at
-  // position pos_ of the input, if possible. If the position
-  // is at or after the end of the input, return false. If there
-  // are more code_units available, return true.
+  void ReadBlockAt(size_t new_pos) {
+    // The callers of this method (Back/Back2/Seek) should handle the easy
+    // case (seeking within the current buffer), and we should only get here
+    // if we actually require new data.
+    // (This is really an efficiency check, not a correctness invariant.)
+    DCHECK(new_pos < buffer_pos_ ||
+           new_pos >= buffer_pos_ + (buffer_end_ - buffer_start_));
+
+    // Change pos() to point to new_pos.
+    buffer_pos_ = new_pos;
+    buffer_cursor_ = buffer_start_;
+    bool success = ReadBlock();
+    USE(success);
+
+    // Post-conditions: 1, on success, we should be at the right position.
+    //                  2, success == we should have more characters available.
+    DCHECK_IMPLIES(success, pos() == new_pos);
+    DCHECK_EQ(success, buffer_cursor_ < buffer_end_);
+    DCHECK_EQ(success, buffer_start_ < buffer_end_);
+  }
+
+  // Read more data, and update buffer_*_ to point to it.
+  // Returns true if more data was available.
+  //
+  // ReadBlock() may modify any of the buffer_*_ members, but must sure that
+  // the result of pos() remains unaffected.
+  //
+  // Examples:
+  // - a stream could either fill a separate buffer. Then buffer_start_ and
+  //   buffer_cursor_ would point to the beginning of the buffer, and
+  //   buffer_pos would be the old pos().
+  // - a stream with existing buffer chunks would set buffer_start_ and
+  //   buffer_end_ to cover the full chunk, and then buffer_cursor_ would
+  //   point into the middle of the buffer, while buffer_pos_ would describe
+  //   the start of the buffer.
   virtual bool ReadBlock() = 0;
-  virtual size_t SlowSeekForward(size_t code_unit_count) = 0;
 
+  const uint16_t* buffer_start_;
   const uint16_t* buffer_cursor_;
   const uint16_t* buffer_end_;
-  size_t pos_;
-};
-
-
-// ---------------------------------------------------------------------
-// DuplicateFinder discovers duplicate symbols.
-
-class DuplicateFinder {
- public:
-  explicit DuplicateFinder(UnicodeCache* constants)
-      : unicode_constants_(constants),
-        backing_store_(16),
-        map_(&Match) { }
-
-  int AddOneByteSymbol(Vector<const uint8_t> key, int value);
-  int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
-  // Add a a number literal by converting it (if necessary)
-  // to the string that ToString(ToNumber(literal)) would generate.
-  // and then adding that string with AddOneByteSymbol.
-  // This string is the actual value used as key in an object literal,
-  // and the one that must be different from the other keys.
-  int AddNumber(Vector<const uint8_t> key, int value);
-
- private:
-  int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
-  // Backs up the key and its length in the backing store.
-  // The backup is stored with a base 127 encoding of the
-  // length (plus a bit saying whether the string is one byte),
-  // followed by the bytes of the key.
-  uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
-
-  // Compare two encoded keys (both pointing into the backing store)
-  // for having the same base-127 encoded lengths and representation.
-  // and then having the same 'length' bytes following.
-  static bool Match(void* first, void* second);
-  // Creates a hash from a sequence of bytes.
-  static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
-  // Checks whether a string containing a JS number is its canonical
-  // form.
-  static bool IsNumberCanonical(Vector<const uint8_t> key);
-
-  // Size of buffer. Sufficient for using it to call DoubleToCString in
-  // from conversions.h.
-  static const int kBufferSize = 100;
-
-  UnicodeCache* unicode_constants_;
-  // Backing store used to store strings used as hashmap keys.
-  SequenceCollector<unsigned char> backing_store_;
-  base::HashMap map_;
-  // Buffer used for string->number->canonical string conversions.
-  char number_buffer_[kBufferSize];
+  size_t buffer_pos_;
 };
 
 
@@ -157,18 +154,24 @@
   // Scoped helper for a re-settable bookmark.
   class BookmarkScope {
    public:
-    explicit BookmarkScope(Scanner* scanner) : scanner_(scanner) {
+    explicit BookmarkScope(Scanner* scanner)
+        : scanner_(scanner), bookmark_(kNoBookmark) {
       DCHECK_NOT_NULL(scanner_);
     }
-    ~BookmarkScope() { scanner_->DropBookmark(); }
+    ~BookmarkScope() {}
 
-    bool Set() { return scanner_->SetBookmark(); }
-    void Reset() { scanner_->ResetToBookmark(); }
-    bool HasBeenSet() { return scanner_->BookmarkHasBeenSet(); }
-    bool HasBeenReset() { return scanner_->BookmarkHasBeenReset(); }
+    void Set();
+    void Apply();
+    bool HasBeenSet();
+    bool HasBeenApplied();
 
    private:
+    static const size_t kNoBookmark;
+    static const size_t kBookmarkWasApplied;
+    static const size_t kBookmarkAtFirstPos;
+
     Scanner* scanner_;
+    size_t bookmark_;
 
     DISALLOW_COPY_AND_ASSIGN(BookmarkScope);
   };
@@ -190,6 +193,7 @@
 
   // -1 is outside of the range of any real source code.
   static const int kNoOctalLocation = -1;
+  static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
 
   explicit Scanner(UnicodeCache* scanner_contants);
 
@@ -251,7 +255,7 @@
     return LiteralMatches(data, length, false);
   }
 
-  void IsGetOrSet(bool* is_get, bool* is_set) {
+  bool IsGetOrSet(bool* is_get, bool* is_set) {
     if (is_literal_one_byte() &&
         literal_length() == 3 &&
         !literal_contains_escapes()) {
@@ -259,7 +263,9 @@
           reinterpret_cast<const char*>(literal_one_byte_string().start());
       *is_get = strncmp(token, "get", 3) == 0;
       *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+      return *is_get || *is_set;
     }
+    return false;
   }
 
   int FindSymbol(DuplicateFinder* finder, int value);
@@ -418,23 +424,6 @@
 
     Handle<String> Internalize(Isolate* isolate) const;
 
-    void CopyFrom(const LiteralBuffer* other) {
-      if (other == nullptr) {
-        Reset();
-      } else {
-        is_one_byte_ = other->is_one_byte_;
-        position_ = other->position_;
-        if (position_ < backing_store_.length()) {
-          std::copy(other->backing_store_.begin(),
-                    other->backing_store_.begin() + position_,
-                    backing_store_.begin());
-        } else {
-          backing_store_.Dispose();
-          backing_store_ = other->backing_store_.Clone();
-        }
-      }
-    }
-
    private:
     static const int kInitialCapacity = 16;
     static const int kGrowthFactory = 4;
@@ -528,15 +517,6 @@
     scanner_error_ = MessageTemplate::kNone;
   }
 
-  // Support BookmarkScope functionality.
-  bool SetBookmark();
-  void ResetToBookmark();
-  bool BookmarkHasBeenSet();
-  bool BookmarkHasBeenReset();
-  void DropBookmark();
-  void CopyToNextTokenDesc(TokenDesc* from);
-  static void CopyTokenDesc(TokenDesc* to, TokenDesc* from);
-
   void ReportScannerError(const Location& location,
                           MessageTemplate::Template error) {
     if (has_error()) return;
@@ -550,6 +530,9 @@
     scanner_error_location_ = Location(pos, pos + 1);
   }
 
+  // Seek to the next_ token at the given position.
+  void SeekNext(size_t position);
+
   // Literal buffer support
   inline void StartLiteral() {
     LiteralBuffer* free_buffer =
@@ -618,7 +601,7 @@
     if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
       uc32 c1 = source_->Advance();
       if (!unibrow::Utf16::IsTrailSurrogate(c1)) {
-        source_->PushBack(c1);
+        source_->Back();
       } else {
         c0_ = unibrow::Utf16::CombineSurrogatePair(c0_, c1);
       }
@@ -627,14 +610,22 @@
 
   void PushBack(uc32 ch) {
     if (c0_ > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
-      source_->PushBack(unibrow::Utf16::TrailSurrogate(c0_));
-      source_->PushBack(unibrow::Utf16::LeadSurrogate(c0_));
+      source_->Back2();
     } else {
-      source_->PushBack(c0_);
+      source_->Back();
     }
     c0_ = ch;
   }
 
+  // Same as PushBack(ch1); PushBack(ch2).
+  // - Potentially more efficient as it uses Back2() on the stream.
+  // - Uses char as parameters, since we're only calling it with ASCII chars in
+  //   practice. This way, we can avoid a few edge cases.
+  void PushBack2(char ch1, char ch2) {
+    source_->Back2();
+    c0_ = ch2;
+  }
+
   inline Token::Value Select(Token::Value tok) {
     Advance();
     return tok;
@@ -790,37 +781,6 @@
   TokenDesc next_;       // desc for next token (one token look-ahead)
   TokenDesc next_next_;  // desc for the token after next (after PeakAhead())
 
-  // Variables for Scanner::BookmarkScope and the *Bookmark implementation.
-  // These variables contain the scanner state when a bookmark is set.
-  //
-  // We will use bookmark_c0_ as a 'control' variable, where:
-  // - bookmark_c0_ >= 0: A bookmark has been set and this contains c0_.
-  // - bookmark_c0_ == -1: No bookmark has been set.
-  // - bookmark_c0_ == -2: The bookmark has been applied (ResetToBookmark).
-  //
-  // Which state is being bookmarked? The parser state is distributed over
-  // several variables, roughly like this:
-  //   ...    1234        +       5678 ..... [character stream]
-  //       [current_] [next_] c0_ |      [scanner state]
-  // So when the scanner is logically at the beginning of an expression
-  // like "1234 + 4567", then:
-  // - current_ contains "1234"
-  // - next_ contains "+"
-  // - c0_ contains ' ' (the space between "+" and "5678",
-  // - the source_ character stream points to the beginning of "5678".
-  // To be able to restore this state, we will keep copies of current_, next_,
-  // and c0_; we'll ask the stream to bookmark itself, and we'll copy the
-  // contents of current_'s and next_'s literal buffers to bookmark_*_literal_.
-  static const uc32 kNoBookmark = -1;
-  static const uc32 kBookmarkWasApplied = -2;
-  uc32 bookmark_c0_;
-  TokenDesc bookmark_current_;
-  TokenDesc bookmark_next_;
-  LiteralBuffer bookmark_current_literal_;
-  LiteralBuffer bookmark_current_raw_literal_;
-  LiteralBuffer bookmark_next_literal_;
-  LiteralBuffer bookmark_next_raw_literal_;
-
   // Input stream. Must be initialized to an Utf16CharacterStream.
   Utf16CharacterStream* source_;
 
diff --git a/src/pending-compilation-error-handler.cc b/src/pending-compilation-error-handler.cc
index f1f9a20..3e88efc 100644
--- a/src/pending-compilation-error-handler.cc
+++ b/src/pending-compilation-error-handler.cc
@@ -4,6 +4,7 @@
 
 #include "src/pending-compilation-error-handler.h"
 
+#include "src/ast/ast-value-factory.h"
 #include "src/debug/debug.h"
 #include "src/handles.h"
 #include "src/isolate.h"
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index 6dd897b..ce423ea 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -1719,7 +1719,6 @@
   // r5 : feedback vector
   // r6 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1742,7 +1741,7 @@
   Register weak_value = r10;
   __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
   __ cmp(r4, weak_value);
-  __ beq(&done_increment_count);
+  __ beq(&done);
   __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
   __ beq(&done);
   __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
@@ -1765,7 +1764,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
   __ cmp(r4, r8);
   __ bne(&megamorphic);
-  __ b(&done_increment_count);
+  __ b(&done);
 
   __ bind(&miss);
 
@@ -1795,32 +1794,22 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done_initialize_count);
+  __ b(&done);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ LoadSmiLiteral(r8, Smi::FromInt(1));
-  __ SmiToPtrArrayOffset(r7, r6);
-  __ add(r7, r5, r7);
-  __ StoreP(r8, FieldMemOperand(r7, count_offset), r0);
-  __ b(&done);
+  __ bind(&done);
 
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ SmiToPtrArrayOffset(r8, r6);
   __ add(r8, r5, r8);
 
   __ LoadP(r7, FieldMemOperand(r8, count_offset));
   __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
   __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
-
-  __ bind(&done);
 }
 
 
@@ -1872,6 +1861,16 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot, Register temp) {
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+  __ SmiToPtrArrayOffset(temp, slot);
+  __ add(feedback_vector, feedback_vector, temp);
+  __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
+  __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
+  __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // r4 - function
@@ -1885,12 +1884,7 @@
   __ mov(r3, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ SmiToPtrArrayOffset(r8, r6);
-  __ add(r5, r5, r8);
-  __ LoadP(r6, FieldMemOperand(r5, count_offset));
-  __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
-  __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
+  IncrementCallCount(masm, r5, r6, r0);
 
   __ mr(r5, r7);
   __ mr(r6, r4);
@@ -1903,7 +1897,7 @@
   // r4 - function
   // r6 - slot id (Smi)
   // r5 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1934,13 +1928,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(r4, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ LoadP(r6, FieldMemOperand(r9, count_offset));
-  __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
-  __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, r5, r6, r0);
+
   __ mov(r3, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1980,6 +1972,11 @@
   __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, r5, r6, r0);
+
+  __ bind(&call_count_incremented);
   __ mov(r3, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -2006,10 +2003,6 @@
   __ cmp(r7, ip);
   __ bne(&miss);
 
-  // Initialize the call counter.
-  __ LoadSmiLiteral(r8, Smi::FromInt(1));
-  __ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
-
   // Store the function. Use a stub since we need a frame for allocation.
   // r5 - vector
   // r6 - slot
@@ -2017,9 +2010,13 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(r5);
+    __ Push(r6);
     __ Push(cp, r4);
     __ CallStub(&create_stub);
     __ Pop(cp, r4);
+    __ Pop(r6);
+    __ Pop(r5);
   }
 
   __ b(&call_function);
@@ -2029,7 +2026,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ b(&call);
+  __ b(&call_count_incremented);
 }
 
 
@@ -2211,290 +2208,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  lr: return address
-  //  sp[0]: to
-  //  sp[4]: from
-  //  sp[8]: string
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length.
-  // If any of these assumptions fail, we call the runtime system.
-
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
-
-  __ LoadP(r5, MemOperand(sp, kToOffset));
-  __ LoadP(r6, MemOperand(sp, kFromOffset));
-
-  // If either to or from had the smi tag bit set, then fail to generic runtime
-  __ JumpIfNotSmi(r5, &runtime);
-  __ JumpIfNotSmi(r6, &runtime);
-  __ SmiUntag(r5);
-  __ SmiUntag(r6, SetRC);
-  // Both r5 and r6 are untagged integers.
-
-  // We want to bailout to runtime here if From is negative.
-  __ blt(&runtime, cr0);  // From < 0.
-
-  __ cmpl(r6, r5);
-  __ bgt(&runtime);  // Fail if from > to.
-  __ sub(r5, r5, r6);
-
-  // Make sure first argument is a string.
-  __ LoadP(r3, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(r3, &runtime);
-  Condition is_string = masm->IsObjectStringType(r3, r4);
-  __ b(NegateCondition(is_string), &runtime, cr0);
-
-  Label single_char;
-  __ cmpi(r5, Operand(1));
-  __ b(eq, &single_char);
-
-  // Short-cut for the case of trivial substring.
-  Label return_r3;
-  // r3: original string
-  // r5: result string length
-  __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
-  __ SmiUntag(r0, r7);
-  __ cmpl(r5, r0);
-  // Return original string.
-  __ beq(&return_r3);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ bgt(&runtime);
-  // Shorter than original string's length: an actual substring.
-
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into r8.
-  // r3: original string
-  // r4: instance type
-  // r5: length
-  // r6: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ andi(r0, r4, Operand(kIsIndirectStringMask));
-  __ beq(&seq_or_external_string, cr0);
-
-  __ andi(r0, r4, Operand(kSlicedNotConsMask));
-  __ bne(&sliced_string, cr0);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
-  __ CompareRoot(r8, Heap::kempty_stringRootIndex);
-  __ bne(&runtime);
-  __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
-  // Update instance type.
-  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
-  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
-  __ b(&underlying_unpacked);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
-  __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
-  __ SmiUntag(r4, r7);
-  __ add(r6, r6, r4);  // Add offset to index.
-  // Update instance type.
-  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
-  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
-  __ b(&underlying_unpacked);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mr(r8, r3);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // r8: underlying subject string
-    // r4: instance type of underlying subject string
-    // r5: length
-    // r6: adjusted start index (untagged)
-    __ cmpi(r5, Operand(SlicedString::kMinLength));
-    // Short slice.  Copy instead of slicing.
-    __ blt(&copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ andi(r0, r4, Operand(kStringEncodingMask));
-    __ beq(&two_byte_slice, cr0);
-    __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
-    __ b(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
-    __ bind(&set_slice_header);
-    __ SmiTag(r6);
-    __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
-    __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
-    __ b(&return_r3);
-
-    __ bind(&copy_routine);
-  }
-
-  // r8: underlying subject string
-  // r4: instance type of underlying subject string
-  // r5: length
-  // r6: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ andi(r0, r4, Operand(kExternalStringTag));
-  __ beq(&sequential_string, cr0);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ andi(r0, r4, Operand(kShortExternalStringTag));
-  __ bne(&runtime, cr0);
-  __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
-  // r8 already points to the first character of underlying string.
-  __ b(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ andi(r0, r4, Operand(kStringEncodingMask));
-  __ beq(&two_byte_sequential, cr0);
-
-  // Allocate and copy the resulting one-byte string.
-  __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
-
-  // Locate first character of substring to copy.
-  __ add(r8, r8, r6);
-  // Locate first character of result.
-  __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // r3: result string
-  // r4: first character of result string
-  // r5: result string length
-  // r8: first character of substring to copy
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
-                                       String::ONE_BYTE_ENCODING);
-  __ b(&return_r3);
-
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
-
-  // Locate first character of substring to copy.
-  __ ShiftLeftImm(r4, r6, Operand(1));
-  __ add(r8, r8, r4);
-  // Locate first character of result.
-  __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // r3: result string.
-  // r4: first character of result.
-  // r5: result length.
-  // r8: first character of substring to copy.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
-                                       String::TWO_BYTE_ENCODING);
-
-  __ bind(&return_r3);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
-  __ Drop(3);
-  __ Ret();
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // r3: original string
-  // r4: instance type
-  // r5: length
-  // r6: from index (untagged)
-  __ SmiTag(r6, r6);
-  StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ Drop(3);
-  __ Ret();
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in r3.
-  Label is_number;
-  __ JumpIfSmi(r3, &is_number);
-
-  __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
-  // r3: receiver
-  // r4: receiver instance type
-  __ Ret(lt);
-
-  Label not_heap_number;
-  __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
-  __ bne(&not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ cmpi(r4, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball);
-  __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in r3.
-  Label is_number;
-  __ JumpIfSmi(r3, &is_number);
-
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
-  // r3: receiver
-  // r4: receiver instance type
-  __ Ret(le);
-
-  Label not_heap_number;
-  __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
-  __ bne(&not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ cmpi(r4, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball);
-  __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3407,19 +3120,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
-  __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
-  __ and_(regs_.scratch0(), regs_.object(), r0);
-  __ LoadP(
-      regs_.scratch1(),
-      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
-  __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
-  __ StoreP(
-      regs_.scratch1(),
-      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
-  __ cmpi(regs_.scratch1(), Operand::Zero());  // PPC, we could do better here
-  __ blt(&need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3854,7 +3554,7 @@
   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
 
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ mr(feedback, too_far);
 
   __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4581,7 +4281,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+    __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
     __ bgt(&too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4972,7 +4672,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+  __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
   __ bgt(&too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index bc188f4..3ff0fde 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -40,13 +40,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r7; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r6; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r8; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return r6; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
+const Register StoreTransitionDescriptor::MapRegister() { return r8; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
@@ -355,7 +351,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       r3,  // callee
@@ -390,7 +386,19 @@
       r3,  // argument count (not including receiver)
       r6,  // new target
       r4,  // constructor to call
-      r5   // address of the first argument
+      r5,  // allocation site feedback if available, undefined otherwise
+      r7   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r3,  // argument count (not including receiver)
+      r4,  // target to call checked to be Array function
+      r5,  // allocation site feedback if available, undefined otherwise
+      r6   // address of the first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 4e39d96..9b5f80e 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -282,9 +282,7 @@
 void MacroAssembler::InNewSpace(Register object, Register scratch,
                                 Condition cond, Label* branch) {
   DCHECK(cond == eq || cond == ne);
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cond, branch);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
 }
 
 
@@ -1814,7 +1812,7 @@
 void MacroAssembler::Allocate(int object_size, Register result,
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
@@ -2070,7 +2068,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
 
   // Make object size into bytes.
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index cf9d4b5..ba4d277 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -140,6 +140,18 @@
   void Ret() { blr(); }
   void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count);
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 2816a87..84fbb39 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -658,9 +658,8 @@
   last_debugger_input_ = input;
 }
 
-
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
-                            size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+                            void* start_addr, size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
   start -= intra_line;
@@ -680,8 +679,8 @@
   }
 }
 
-
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                   void* page) {
   base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
@@ -692,7 +691,8 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+                             intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -704,7 +704,8 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+                            Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -737,7 +738,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new base::HashMap(&ICacheMatch);
+    i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -872,7 +873,8 @@
 
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
     for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
diff --git a/src/ppc/simulator-ppc.h b/src/ppc/simulator-ppc.h
index d3163e8..d061545 100644
--- a/src/ppc/simulator-ppc.h
+++ b/src/ppc/simulator-ppc.h
@@ -217,7 +217,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -239,7 +239,8 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -329,9 +330,12 @@
   void ExecuteInstruction(Instruction* instr);
 
   // ICache.
-  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
-  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+  static void CheckICache(base::CustomMatcherHashMap* i_cache,
+                          Instruction* instr);
+  static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                 void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -369,7 +373,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  base::HashMap* i_cache_;
+  base::CustomMatcherHashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/profiler/OWNERS b/src/profiler/OWNERS
new file mode 100644
index 0000000..87c9661
--- /dev/null
+++ b/src/profiler/OWNERS
@@ -0,0 +1 @@
+alph@chromium.org
diff --git a/src/profiler/allocation-tracker.cc b/src/profiler/allocation-tracker.cc
index d094d0e..99b0b70 100644
--- a/src/profiler/allocation-tracker.cc
+++ b/src/profiler/allocation-tracker.cc
@@ -193,7 +193,7 @@
 AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
     : ids_(ids),
       names_(names),
-      id_to_function_info_index_(base::HashMap::PointersMatch),
+      id_to_function_info_index_(),
       info_index_for_other_state_(0) {
   FunctionInfo* info = new FunctionInfo();
   info->name = "(root)";
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index e3df609..e9ccc57 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -11,7 +11,6 @@
 #include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/platform/time.h"
-#include "src/compiler.h"
 #include "src/isolate.h"
 #include "src/libsampler/sampler.h"
 #include "src/locked-queue.h"
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 9273168..d0fa2e4 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -355,16 +355,8 @@
     HeapObjectsMap::kGcRootsFirstSubrootId +
     VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
 
-
-static bool AddressesMatch(void* key1, void* key2) {
-  return key1 == key2;
-}
-
-
 HeapObjectsMap::HeapObjectsMap(Heap* heap)
-    : next_id_(kFirstAvailableObjectId),
-      entries_map_(AddressesMatch),
-      heap_(heap) {
+    : next_id_(kFirstAvailableObjectId), heap_(heap) {
   // This dummy element solves a problem with entries_map_.
   // When we do lookup in HashMap we see no difference between two cases:
   // it has an entry with NULL as the value or it has created
@@ -476,7 +468,7 @@
            entries_map_.occupancy());
   }
   heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "HeapObjectsMap::UpdateHeapObjectsMap");
+                           GarbageCollectionReason::kHeapProfiler);
   HeapIterator iterator(heap_);
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -704,7 +696,7 @@
          GetMemoryUsedByList(entries_) + GetMemoryUsedByList(time_intervals_);
 }
 
-HeapEntriesMap::HeapEntriesMap() : entries_(base::HashMap::PointersMatch) {}
+HeapEntriesMap::HeapEntriesMap() : entries_() {}
 
 int HeapEntriesMap::Map(HeapThing thing) {
   base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
@@ -720,7 +712,7 @@
   cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
 }
 
-HeapObjectsSet::HeapObjectsSet() : entries_(base::HashMap::PointersMatch) {}
+HeapObjectsSet::HeapObjectsSet() : entries_() {}
 
 void HeapObjectsSet::Clear() {
   entries_.Clear();
@@ -1216,8 +1208,7 @@
     }
     if (scope_info->HasFunctionName()) {
       String* name = scope_info->FunctionName();
-      VariableMode mode;
-      int idx = scope_info->FunctionContextSlotIndex(name, &mode);
+      int idx = scope_info->FunctionContextSlotIndex(name);
       if (idx >= 0) {
         SetContextReference(context, entry, name, context->get(idx),
                             Context::OffsetOfElementAt(idx));
@@ -1831,6 +1822,7 @@
          object != heap_->empty_byte_array() &&
          object != heap_->empty_fixed_array() &&
          object != heap_->empty_descriptor_array() &&
+         object != heap_->empty_type_feedback_vector() &&
          object != heap_->fixed_array_map() && object != heap_->cell_map() &&
          object != heap_->global_property_cell_map() &&
          object != heap_->shared_function_info_map() &&
@@ -2507,12 +2499,10 @@
   // full GC is reachable from the root when computing dominators.
   // This is not true for weakly reachable objects.
   // As a temporary solution we call GC twice.
-  heap_->CollectAllGarbage(
-      Heap::kMakeHeapIterableMask,
-      "HeapSnapshotGenerator::GenerateSnapshot");
-  heap_->CollectAllGarbage(
-      Heap::kMakeHeapIterableMask,
-      "HeapSnapshotGenerator::GenerateSnapshot");
+  heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                           GarbageCollectionReason::kHeapProfiler);
+  heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                           GarbageCollectionReason::kHeapProfiler);
 
 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h
index b870fbe..b235ff0 100644
--- a/src/profiler/heap-snapshot-generator.h
+++ b/src/profiler/heap-snapshot-generator.h
@@ -525,8 +525,8 @@
   bool embedder_queried_;
   HeapObjectsSet in_groups_;
   // RetainedObjectInfo* -> List<HeapObject*>*
-  base::HashMap objects_by_info_;
-  base::HashMap native_groups_;
+  base::CustomMatcherHashMap objects_by_info_;
+  base::CustomMatcherHashMap native_groups_;
   HeapEntriesAllocator* synthetic_entries_allocator_;
   HeapEntriesAllocator* native_entries_allocator_;
   // Used during references extraction.
@@ -613,7 +613,7 @@
   static const int kNodeFieldsCount;
 
   HeapSnapshot* snapshot_;
-  base::HashMap strings_;
+  base::CustomMatcherHashMap strings_;
   int next_node_id_;
   int next_string_id_;
   OutputStreamWriter* writer_;
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index b785eaa..179d411 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -8,8 +8,9 @@
 #include <map>
 #include "src/allocation.h"
 #include "src/base/hashmap.h"
-#include "src/compiler.h"
+#include "src/log.h"
 #include "src/profiler/strings-storage.h"
+#include "src/source-position.h"
 
 namespace v8 {
 namespace internal {
@@ -220,10 +221,10 @@
   CodeEntry* entry_;
   unsigned self_ticks_;
   // Mapping from CodeEntry* to ProfileNode*
-  base::HashMap children_;
+  base::CustomMatcherHashMap children_;
   List<ProfileNode*> children_list_;
   unsigned id_;
-  base::HashMap line_ticks_;
+  base::CustomMatcherHashMap line_ticks_;
 
   std::vector<CpuProfileDeoptInfo> deopt_infos_;
 
@@ -260,7 +261,7 @@
   Isolate* isolate_;
 
   unsigned next_function_id_;
-  base::HashMap function_ids_;
+  base::CustomMatcherHashMap function_ids_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfileTree);
 };
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
index 7ce874e..4bceac2 100644
--- a/src/profiler/profiler-listener.cc
+++ b/src/profiler/profiler-listener.cc
@@ -319,6 +319,7 @@
 }
 
 void ProfilerListener::AddObserver(CodeEventObserver* observer) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
   if (std::find(observers_.begin(), observers_.end(), observer) !=
       observers_.end())
     return;
@@ -326,6 +327,7 @@
 }
 
 void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
   auto it = std::find(observers_.begin(), observers_.end(), observer);
   if (it == observers_.end()) return;
   observers_.erase(it);
diff --git a/src/profiler/profiler-listener.h b/src/profiler/profiler-listener.h
index 7e24cea..500b7ae 100644
--- a/src/profiler/profiler-listener.h
+++ b/src/profiler/profiler-listener.h
@@ -79,6 +79,7 @@
   void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
   Name* InferScriptName(Name* name, SharedFunctionInfo* info);
   V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
+    base::LockGuard<base::Mutex> guard(&mutex_);
     for (auto observer : observers_) {
       observer->CodeEventHandler(evt_rec);
     }
@@ -87,6 +88,7 @@
   StringsStorage function_and_resource_names_;
   std::vector<CodeEntry*> code_entries_;
   std::vector<CodeEventObserver*> observers_;
+  base::Mutex mutex_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
 };
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index b4361ee..3b2ca63 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -259,8 +259,8 @@
 
 v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
   if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
-    isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
-                                        "SamplingHeapProfiler");
+    isolate_->heap()->CollectAllGarbage(
+        Heap::kNoGCFlags, GarbageCollectionReason::kSamplingProfiler);
   }
   // To resolve positions to line/column numbers, we will need to look up
   // scripts. Build a map to allow fast mapping from script id to script.
diff --git a/src/profiler/strings-storage.h b/src/profiler/strings-storage.h
index f98aa5e..f11afbd 100644
--- a/src/profiler/strings-storage.h
+++ b/src/profiler/strings-storage.h
@@ -36,10 +36,10 @@
 
   static bool StringsMatch(void* key1, void* key2);
   const char* AddOrDisposeString(char* str, int len);
-  base::HashMap::Entry* GetEntry(const char* str, int len);
+  base::CustomMatcherHashMap::Entry* GetEntry(const char* str, int len);
 
   uint32_t hash_seed_;
-  base::HashMap names_;
+  base::CustomMatcherHashMap names_;
 
   DISALLOW_COPY_AND_ASSIGN(StringsStorage);
 };
diff --git a/src/profiler/tracing-cpu-profiler.cc b/src/profiler/tracing-cpu-profiler.cc
new file mode 100644
index 0000000..b24ca2f
--- /dev/null
+++ b/src/profiler/tracing-cpu-profiler.cc
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tracing-cpu-profiler.h"
+
+#include "src/v8.h"
+
+namespace v8 {
+
+std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
+    v8::Isolate* isolate) {
+  return std::unique_ptr<TracingCpuProfiler>(
+      new internal::TracingCpuProfilerImpl(
+          reinterpret_cast<internal::Isolate*>(isolate)));
+}
+
+namespace internal {
+
+TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate) {}
+
+TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/profiler/tracing-cpu-profiler.h b/src/profiler/tracing-cpu-profiler.h
new file mode 100644
index 0000000..80f1bdc
--- /dev/null
+++ b/src/profiler/tracing-cpu-profiler.h
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
+#define V8_PROFILER_TRACING_CPU_PROFILER_H
+
+#include "include/v8-profiler.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class TracingCpuProfilerImpl final : public TracingCpuProfiler {
+ public:
+  explicit TracingCpuProfilerImpl(Isolate*);
+  ~TracingCpuProfilerImpl();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TracingCpuProfilerImpl);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PROFILER_TRACING_CPU_PROFILER_H
diff --git a/src/property-details.h b/src/property-details.h
index 87df02d..d720b1c 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -62,7 +62,6 @@
               static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
 
 class Smi;
-class Type;
 class TypeInfo;
 
 // Type of properties.
diff --git a/src/property.h b/src/property.h
index add9e4d..ebe7d3b 100644
--- a/src/property.h
+++ b/src/property.h
@@ -36,6 +36,7 @@
 
   void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
     DCHECK(key->IsUniqueName());
+    DCHECK_IMPLIES(key->IsPrivate(), !details.IsEnumerable());
     key_ = key;
     value_ = value;
     details_ = details;
@@ -44,6 +45,7 @@
   Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details)
       : key_(key), value_(value), details_(details) {
     DCHECK(key->IsUniqueName());
+    DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
   }
 
   Descriptor(Handle<Name> key, Handle<Object> value,
@@ -53,6 +55,7 @@
         value_(value),
         details_(attributes, type, representation, field_index) {
     DCHECK(key->IsUniqueName());
+    DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
   }
 
   friend class DescriptorArray;
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index 0fd1a76..96a778c 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -8,7 +8,6 @@
 
 #include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
-#include "src/compiler.h"
 #include "src/elements.h"
 #include "src/execution.h"
 #include "src/factory.h"
diff --git a/src/regexp/jsregexp.h b/src/regexp/jsregexp.h
index 31c427a..8118889 100644
--- a/src/regexp/jsregexp.h
+++ b/src/regexp/jsregexp.h
@@ -46,7 +46,7 @@
 
   // See ECMA-262 section 15.10.6.2.
   // This function calls the garbage collector if necessary.
-  MUST_USE_RESULT static MaybeHandle<Object> Exec(
+  V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Exec(
       Handle<JSRegExp> regexp, Handle<String> subject, int index,
       Handle<JSObject> lastMatchInfo);
 
@@ -200,7 +200,7 @@
   // is not tracked, however.  As a conservative approximation we track the
   // total regexp code compiled including code that has subsequently been freed
   // and the total executable memory at any point.
-  static const int kRegExpExecutableMemoryLimit = 16 * MB;
+  static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
   static const int kRegExpCompiledLimit = 1 * MB;
   static const int kRegExpTooLargeToOptimize = 20 * KB;
 
diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h
index 406bf84..07a8155 100644
--- a/src/regexp/regexp-ast.h
+++ b/src/regexp/regexp-ast.h
@@ -7,8 +7,8 @@
 
 #include "src/objects.h"
 #include "src/utils.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/regexp/regexp-parser.h b/src/regexp/regexp-parser.h
index a0b975d..2cf937f 100644
--- a/src/regexp/regexp-parser.h
+++ b/src/regexp/regexp-parser.h
@@ -7,7 +7,7 @@
 
 #include "src/objects.h"
 #include "src/regexp/regexp-ast.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index fb05690..b1e640c 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -9,6 +9,7 @@
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
+#include "src/compiler.h"
 #include "src/execution.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
@@ -54,6 +55,33 @@
 static const int kMaxSizeEarlyOpt =
     5 * FullCodeGenerator::kCodeSizeMultiplier;
 
+#define OPTIMIZATION_REASON_LIST(V)                            \
+  V(DoNotOptimize, "do not optimize")                          \
+  V(HotAndStable, "hot and stable")                            \
+  V(HotEnoughForBaseline, "hot enough for baseline")           \
+  V(HotWithoutMuchTypeInfo, "not much type info but very hot") \
+  V(SmallFunction, "small function")
+
+enum class OptimizationReason : uint8_t {
+#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
+  OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
+#undef OPTIMIZATION_REASON_CONSTANTS
+};
+
+char const* OptimizationReasonToString(OptimizationReason reason) {
+  static char const* reasons[] = {
+#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
+      OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
+#undef OPTIMIZATION_REASON_TEXTS
+  };
+  size_t const index = static_cast<size_t>(reason);
+  DCHECK_LT(index, arraysize(reasons));
+  return reasons[index];
+}
+
+std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
+  return os << OptimizationReasonToString(reason);
+}
 
 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
     : isolate_(isolate),
@@ -79,8 +107,15 @@
 
   // Harvest vector-ics as well
   TypeFeedbackVector* vector = function->feedback_vector();
-  int with = 0, gen = 0;
-  vector->ComputeCounts(&with, &gen);
+  int with = 0, gen = 0, type_vector_ic_count = 0;
+  const bool is_interpreted =
+      function->shared()->code()->is_interpreter_trampoline_builtin();
+
+  vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
+  if (is_interpreted) {
+    DCHECK_EQ(*ic_total_count, 0);
+    *ic_total_count = type_vector_ic_count;
+  }
   *ic_with_type_info_count += with;
   *ic_generic_count += gen;
 
@@ -112,13 +147,17 @@
   }
 }
 
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
-  TraceRecompile(function, reason, "optimized");
+void RuntimeProfiler::Optimize(JSFunction* function,
+                               OptimizationReason reason) {
+  DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
+  TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
   function->AttemptConcurrentOptimization();
 }
 
-void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
-  TraceRecompile(function, reason, "baseline");
+void RuntimeProfiler::Baseline(JSFunction* function,
+                               OptimizationReason reason) {
+  DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
+  TraceRecompile(function, OptimizationReasonToString(reason), "baseline");
 
   // TODO(4280): Fix this to check function is compiled for the interpreter
   // once we have a standard way to check that. For now function will only
@@ -237,9 +276,9 @@
         generic_percentage <= FLAG_generic_ic_threshold) {
       // If this particular function hasn't had any ICs patched for enough
       // ticks, optimize it now.
-      Optimize(function, "hot and stable");
+      Optimize(function, OptimizationReason::kHotAndStable);
     } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
-      Optimize(function, "not much type info but very hot");
+      Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
     } else {
       shared_code->set_profiler_ticks(ticks + 1);
       if (FLAG_trace_opt_verbose) {
@@ -258,7 +297,7 @@
                 &generic_percentage);
     if (type_percentage >= FLAG_type_info_threshold &&
         generic_percentage <= FLAG_generic_ic_threshold) {
-      Optimize(function, "small function");
+      Optimize(function, OptimizationReason::kSmallFunction);
     } else {
       shared_code->set_profiler_ticks(ticks + 1);
     }
@@ -271,31 +310,16 @@
                                             JavaScriptFrame* frame) {
   if (function->IsInOptimizationQueue()) return;
 
-  SharedFunctionInfo* shared = function->shared();
-  int ticks = shared->profiler_ticks();
-
-  // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
-  // than kMaxToplevelSourceSize.
-
   if (FLAG_always_osr) {
     AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
     // Fall through and do a normal baseline compile as well.
-  } else if (!frame->is_optimized() &&
-             (function->IsMarkedForBaseline() ||
-              function->IsMarkedForOptimization() ||
-              function->IsMarkedForConcurrentOptimization() ||
-              function->IsOptimized())) {
-    // Attempt OSR if we are still running interpreted code even though the
-    // the function has long been marked or even already been optimized.
-    int64_t allowance =
-        kOSRCodeSizeAllowanceBaseIgnition +
-        static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
-    if (shared->bytecode_array()->Size() <= allowance) {
-      AttemptOnStackReplacement(frame);
-    }
+  } else if (MaybeOSRIgnition(function, frame)) {
     return;
   }
 
+  SharedFunctionInfo* shared = function->shared();
+  int ticks = shared->profiler_ticks();
+
   if (shared->optimization_disabled() &&
       shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
     // Don't baseline functions which have been marked by NeverOptimizeFunction
@@ -304,7 +328,7 @@
   }
 
   if (ticks >= kProfilerTicksBeforeBaseline) {
-    Baseline(function, "hot enough for baseline");
+    Baseline(function, OptimizationReason::kHotEnoughForBaseline);
   }
 }
 
@@ -312,31 +336,16 @@
                                             JavaScriptFrame* frame) {
   if (function->IsInOptimizationQueue()) return;
 
-  SharedFunctionInfo* shared = function->shared();
-  int ticks = shared->profiler_ticks();
-
-  // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
-  // than kMaxToplevelSourceSize.
-
   if (FLAG_always_osr) {
     AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
     // Fall through and do a normal optimized compile as well.
-  } else if (!frame->is_optimized() &&
-             (function->IsMarkedForBaseline() ||
-              function->IsMarkedForOptimization() ||
-              function->IsMarkedForConcurrentOptimization() ||
-              function->IsOptimized())) {
-    // Attempt OSR if we are still running interpreted code even though the
-    // the function has long been marked or even already been optimized.
-    int64_t allowance =
-        kOSRCodeSizeAllowanceBaseIgnition +
-        static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
-    if (shared->bytecode_array()->Size() <= allowance) {
-      AttemptOnStackReplacement(frame);
-    }
+  } else if (MaybeOSRIgnition(function, frame)) {
     return;
   }
 
+  SharedFunctionInfo* shared = function->shared();
+  int ticks = shared->profiler_ticks();
+
   if (shared->optimization_disabled()) {
     if (shared->deopt_count() >= FLAG_max_opt_count) {
       // If optimization was disabled due to many deoptimizations,
@@ -348,8 +357,51 @@
     }
     return;
   }
+
   if (function->IsOptimized()) return;
 
+  OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
+
+  if (reason != OptimizationReason::kDoNotOptimize) {
+    Optimize(function, reason);
+  }
+}
+
+bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
+                                       JavaScriptFrame* frame) {
+  if (!FLAG_ignition_osr) return false;
+
+  SharedFunctionInfo* shared = function->shared();
+  int ticks = shared->profiler_ticks();
+
+  // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
+  // than kMaxToplevelSourceSize.
+
+  bool osr_before_baselined = function->IsMarkedForBaseline() &&
+                              ShouldOptimizeIgnition(function, frame) !=
+                                  OptimizationReason::kDoNotOptimize;
+  if (!frame->is_optimized() &&
+      (osr_before_baselined || function->IsMarkedForOptimization() ||
+       function->IsMarkedForConcurrentOptimization() ||
+       function->IsOptimized())) {
+    // Attempt OSR if we are still running interpreted code even though the
+    // the function has long been marked or even already been optimized.
+    int64_t allowance =
+        kOSRCodeSizeAllowanceBaseIgnition +
+        static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
+    if (shared->bytecode_array()->Size() <= allowance) {
+      AttemptOnStackReplacement(frame);
+    }
+    return true;
+  }
+  return false;
+}
+
+OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
+    JSFunction* function, JavaScriptFrame* frame) {
+  SharedFunctionInfo* shared = function->shared();
+  int ticks = shared->profiler_ticks();
+
   if (ticks >= kProfilerTicksBeforeOptimization) {
     int typeinfo, generic, total, type_percentage, generic_percentage;
     GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
@@ -358,9 +410,9 @@
         generic_percentage <= FLAG_generic_ic_threshold) {
       // If this particular function hasn't had any ICs patched for enough
       // ticks, optimize it now.
-      Optimize(function, "hot and stable");
+      return OptimizationReason::kHotAndStable;
     } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
-      Optimize(function, "not much type info but very hot");
+      return OptimizationReason::kHotWithoutMuchTypeInfo;
     } else {
       if (FLAG_trace_opt_verbose) {
         PrintF("[not yet optimizing ");
@@ -368,10 +420,12 @@
         PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
                type_percentage);
       }
+      return OptimizationReason::kDoNotOptimize;
     }
   }
   // TODO(rmcilroy): Consider whether we should optimize small functions when
   // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
+  return OptimizationReason::kDoNotOptimize;
 }
 
 void RuntimeProfiler::MarkCandidatesForOptimization() {
@@ -419,6 +473,5 @@
   any_ic_changed_ = false;
 }
 
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 7f2c902..5c538c4 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -13,6 +13,7 @@
 class Isolate;
 class JavaScriptFrame;
 class JSFunction;
+enum class OptimizationReason : uint8_t;
 
 class RuntimeProfiler {
  public:
@@ -30,8 +31,13 @@
                                 int frame_count);
   void MaybeBaselineIgnition(JSFunction* function, JavaScriptFrame* frame);
   void MaybeOptimizeIgnition(JSFunction* function, JavaScriptFrame* frame);
-  void Optimize(JSFunction* function, const char* reason);
-  void Baseline(JSFunction* function, const char* reason);
+  // Potentially attempts OSR from ignition and returns whether no other
+  // optimization attempts should be made.
+  bool MaybeOSRIgnition(JSFunction* function, JavaScriptFrame* frame);
+  OptimizationReason ShouldOptimizeIgnition(JSFunction* function,
+                                            JavaScriptFrame* frame);
+  void Optimize(JSFunction* function, OptimizationReason reason);
+  void Baseline(JSFunction* function, OptimizationReason reason);
 
   Isolate* isolate_;
   bool any_ic_changed_;
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index 4b7cd39..cbde8f3 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -375,15 +375,9 @@
   uint32_t index = static_cast<uint32_t>(key);
 
   if (index >= capacity) {
-    if (object->map()->is_prototype_map() ||
-        object->WouldConvertToSlowElements(index)) {
-      // We don't want to allow operations that cause lazy deopt. Return a Smi
-      // as a signal that optimized code should eagerly deoptimize.
+    if (!object->GetElementsAccessor()->GrowCapacity(object, index)) {
       return Smi::FromInt(0);
     }
-
-    uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
-    object->GetElementsAccessor()->GrowCapacityAndConvert(object, new_capacity);
   }
 
   // On success, return the fixed array elements.
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index 5448159..323604f 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -95,7 +95,8 @@
       prototype_parent = isolate->factory()->null_value();
     } else if (super_class->IsConstructor()) {
       DCHECK(!super_class->IsJSFunction() ||
-             !Handle<JSFunction>::cast(super_class)->shared()->is_resumable());
+             !IsResumableFunction(
+                 Handle<JSFunction>::cast(super_class)->shared()->kind()));
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, prototype_parent,
           Runtime::GetObjectProperty(isolate, super_class,
@@ -187,52 +188,65 @@
                            end_position));
 }
 
+namespace {
 
-static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
+enum class SuperMode { kLoad, kStore };
+
+MaybeHandle<JSReceiver> GetSuperHolder(
+    Isolate* isolate, Handle<Object> receiver, Handle<JSObject> home_object,
+    SuperMode mode, MaybeHandle<Name> maybe_name, uint32_t index) {
+  if (home_object->IsAccessCheckNeeded() &&
+      !isolate->MayAccess(handle(isolate->context()), home_object)) {
+    isolate->ReportFailedAccessCheck(home_object);
+    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, JSReceiver);
+  }
+
+  PrototypeIterator iter(isolate, home_object);
+  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+  if (!proto->IsJSReceiver()) {
+    MessageTemplate::Template message =
+        mode == SuperMode::kLoad ? MessageTemplate::kNonObjectPropertyLoad
+                                 : MessageTemplate::kNonObjectPropertyStore;
+    Handle<Name> name;
+    if (!maybe_name.ToHandle(&name)) {
+      name = isolate->factory()->Uint32ToString(index);
+    }
+    THROW_NEW_ERROR(isolate, NewTypeError(message, name, proto), JSReceiver);
+  }
+  return Handle<JSReceiver>::cast(proto);
+}
+
+MaybeHandle<Object> LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
+                                  Handle<JSObject> home_object,
+                                  Handle<Name> name) {
+  Handle<JSReceiver> holder;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, holder,
+      GetSuperHolder(isolate, receiver, home_object, SuperMode::kLoad, name, 0),
+      Object);
+  LookupIterator it(receiver, name, holder);
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
+  return result;
+}
+
+MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
                                          Handle<Object> receiver,
                                          Handle<JSObject> home_object,
-                                         Handle<Name> name) {
-  if (home_object->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()), home_object)) {
-    isolate->ReportFailedAccessCheck(home_object);
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-  }
-
-  PrototypeIterator iter(isolate, home_object);
-  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
-  if (!proto->IsJSReceiver()) {
-    return Object::ReadAbsentProperty(isolate, proto, name);
-  }
-
-  LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+                                         uint32_t index) {
+  Handle<JSReceiver> holder;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, holder,
+      GetSuperHolder(isolate, receiver, home_object, SuperMode::kLoad,
+                     MaybeHandle<Name>(), index),
+      Object);
+  LookupIterator it(isolate, receiver, index, holder);
   Handle<Object> result;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
   return result;
 }
 
-static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
-                                                Handle<Object> receiver,
-                                                Handle<JSObject> home_object,
-                                                uint32_t index) {
-  if (home_object->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()), home_object)) {
-    isolate->ReportFailedAccessCheck(home_object);
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-  }
-
-  PrototypeIterator iter(isolate, home_object);
-  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
-  if (!proto->IsJSReceiver()) {
-    Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
-    return Object::ReadAbsentProperty(isolate, proto, name);
-  }
-
-  LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
-  return result;
-}
-
+}  // anonymous namespace
 
 RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
   HandleScope scope(isolate);
@@ -272,50 +286,43 @@
                            LoadFromSuper(isolate, receiver, home_object, name));
 }
 
+namespace {
 
-static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
-                            Handle<Object> receiver, Handle<Name> name,
-                            Handle<Object> value, LanguageMode language_mode) {
-  if (home_object->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()), home_object)) {
-    isolate->ReportFailedAccessCheck(home_object);
-    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  }
-
-  PrototypeIterator iter(isolate, home_object);
-  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
-  if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
-
-  LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
+                                 Handle<Object> receiver, Handle<Name> name,
+                                 Handle<Object> value,
+                                 LanguageMode language_mode) {
+  Handle<JSReceiver> holder;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, holder,
+                             GetSuperHolder(isolate, receiver, home_object,
+                                            SuperMode::kStore, name, 0),
+                             Object);
+  LookupIterator it(receiver, name, holder);
   MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
                                         Object::CERTAINLY_NOT_STORE_FROM_KEYED),
-               isolate->heap()->exception());
-  return *value;
+               MaybeHandle<Object>());
+  return value;
 }
 
-
-static Object* StoreElementToSuper(Isolate* isolate,
-                                   Handle<JSObject> home_object,
-                                   Handle<Object> receiver, uint32_t index,
-                                   Handle<Object> value,
-                                   LanguageMode language_mode) {
-  if (home_object->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()), home_object)) {
-    isolate->ReportFailedAccessCheck(home_object);
-    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  }
-
-  PrototypeIterator iter(isolate, home_object);
-  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
-  if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
-
-  LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
+                                        Handle<JSObject> home_object,
+                                        Handle<Object> receiver, uint32_t index,
+                                        Handle<Object> value,
+                                        LanguageMode language_mode) {
+  Handle<JSReceiver> holder;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, holder,
+      GetSuperHolder(isolate, receiver, home_object, SuperMode::kStore,
+                     MaybeHandle<Name>(), index),
+      Object);
+  LookupIterator it(isolate, receiver, index, holder);
   MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
                                         Object::MAY_BE_STORE_FROM_KEYED),
-               isolate->heap()->exception());
-  return *value;
+               MaybeHandle<Object>());
+  return value;
 }
 
+}  // anonymous namespace
 
 RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
   HandleScope scope(isolate);
@@ -325,7 +332,8 @@
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
 
-  return StoreToSuper(isolate, home_object, receiver, name, value, STRICT);
+  RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
+                                                 name, value, STRICT));
 }
 
 
@@ -337,14 +345,13 @@
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
 
-  return StoreToSuper(isolate, home_object, receiver, name, value, SLOPPY);
+  RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
+                                                 name, value, SLOPPY));
 }
 
-
-static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
-                                 Handle<Object> receiver, Handle<Object> key,
-                                 Handle<Object> value,
-                                 LanguageMode language_mode) {
+static MaybeHandle<Object> StoreKeyedToSuper(
+    Isolate* isolate, Handle<JSObject> home_object, Handle<Object> receiver,
+    Handle<Object> key, Handle<Object> value, LanguageMode language_mode) {
   uint32_t index = 0;
 
   if (key->ToArrayIndex(&index)) {
@@ -352,8 +359,8 @@
                                language_mode);
   }
   Handle<Name> name;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
-                                     Object::ToName(isolate, key));
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+                             Object);
   // TODO(verwaest): Unify using LookupIterator.
   if (name->AsArrayIndex(&index)) {
     return StoreElementToSuper(isolate, home_object, receiver, index, value,
@@ -372,7 +379,9 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
 
-  return StoreKeyedToSuper(isolate, home_object, receiver, key, value, STRICT);
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      StoreKeyedToSuper(isolate, home_object, receiver, key, value, STRICT));
 }
 
 
@@ -384,7 +393,9 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
 
-  return StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY);
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY));
 }
 
 
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index b5910e4..01ec73d 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -11,6 +11,7 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/interpreter/bytecode-array-iterator.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/v8threads.h"
@@ -172,6 +173,17 @@
 
   DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
   DCHECK(type == deoptimizer->bailout_type());
+  DCHECK_NULL(isolate->context());
+
+  // TODO(turbofan): For Crankshaft we restore the context before objects are
+  // being materialized, because it never de-materializes the context but it
+  // requires a context to materialize arguments objects. This is specific to
+  // Crankshaft and can be removed once only TurboFan goes through here.
+  if (!optimized_code->is_turbofanned()) {
+    JavaScriptFrameIterator top_it(isolate);
+    JavaScriptFrame* top_frame = top_it.frame();
+    isolate->set_context(Context::cast(top_frame->context()));
+  }
 
   // Make sure to materialize objects before causing any allocation.
   JavaScriptFrameIterator it(isolate);
@@ -179,9 +191,11 @@
   delete deoptimizer;
 
   // Ensure the context register is updated for materialized objects.
-  JavaScriptFrameIterator top_it(isolate);
-  JavaScriptFrame* top_frame = top_it.frame();
-  isolate->set_context(Context::cast(top_frame->context()));
+  if (optimized_code->is_turbofanned()) {
+    JavaScriptFrameIterator top_it(isolate);
+    JavaScriptFrame* top_frame = top_it.frame();
+    isolate->set_context(Context::cast(top_frame->context()));
+  }
 
   if (type == Deoptimizer::LAZY) {
     return isolate->heap()->undefined_value();
@@ -279,7 +293,20 @@
   // Reset the OSR loop nesting depth to disarm back edges.
   bytecode->set_osr_loop_nesting_level(0);
 
-  return BailoutId(iframe->GetBytecodeOffset());
+  // Translate the offset of the jump instruction to the jump target offset of
+  // that instruction so that the derived BailoutId points to the loop header.
+  // TODO(mstarzinger): This can be merged with {BytecodeBranchAnalysis} which
+  // already performs a pre-pass over the bytecode stream anyways.
+  int jump_offset = iframe->GetBytecodeOffset();
+  interpreter::BytecodeArrayIterator iterator(bytecode);
+  while (iterator.current_offset() + iterator.current_prefix_offset() <
+         jump_offset) {
+    iterator.Advance();
+  }
+  DCHECK(interpreter::Bytecodes::IsJump(iterator.current_bytecode()));
+  int jump_target_offset = iterator.GetJumpTargetOffset();
+
+  return BailoutId(jump_target_offset);
 }
 
 }  // namespace
@@ -335,10 +362,18 @@
       function->shared()->increment_deopt_count();
 
       if (result->is_turbofanned()) {
-        // TurboFanned OSR code cannot be installed into the function.
-        // But the function is obviously hot, so optimize it next time.
-        function->ReplaceCode(
-            isolate->builtins()->builtin(Builtins::kCompileOptimized));
+        // When we're waiting for concurrent optimization, set to compile on
+        // the next call - otherwise we'd run unoptimized once more
+        // and potentially compile for OSR another time as well.
+        if (function->IsMarkedForConcurrentOptimization()) {
+          if (FLAG_trace_osr) {
+            PrintF("[OSR - Re-marking ");
+            function->PrintName();
+            PrintF(" for non-concurrent optimization]\n");
+          }
+          function->ReplaceCode(
+              isolate->builtins()->builtin(Builtins::kCompileOptimized));
+        }
       } else {
         // Crankshafted OSR code can be installed into the function.
         function->ReplaceCode(*result);
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index a8c465a..2d217b8 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -9,6 +9,7 @@
 #include "src/debug/debug-frames.h"
 #include "src/debug/debug-scopes.h"
 #include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
 #include "src/frames-inl.h"
 #include "src/globals.h"
 #include "src/interpreter/bytecodes.h"
@@ -1521,7 +1522,8 @@
 RUNTIME_FUNCTION(Runtime_CollectGarbage) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
-  isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
+  isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                     GarbageCollectionReason::kRuntime);
   return isolate->heap()->undefined_value();
 }
 
diff --git a/src/runtime/runtime-forin.cc b/src/runtime/runtime-forin.cc
index 0d624e9..bd37cdc 100644
--- a/src/runtime/runtime-forin.cc
+++ b/src/runtime/runtime-forin.cc
@@ -140,17 +140,6 @@
   return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
 }
 
-
-RUNTIME_FUNCTION(Runtime_ForInDone) {
-  SealHandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(index, 0);
-  CONVERT_SMI_ARG_CHECKED(length, 1);
-  DCHECK_LE(0, index);
-  DCHECK_LE(index, length);
-  return isolate->heap()->ToBoolean(index == length);
-}
-
 RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -188,15 +177,5 @@
                            HasEnumerableProperty(isolate, receiver, key));
 }
 
-
-RUNTIME_FUNCTION(Runtime_ForInStep) {
-  SealHandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_SMI_ARG_CHECKED(index, 0);
-  DCHECK_LE(0, index);
-  DCHECK_LT(index, Smi::kMaxValue);
-  return Smi::FromInt(index + 1);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index 298f1a1..fa50941 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -174,6 +174,7 @@
     target_shared->set_bytecode_array(source_shared->bytecode_array());
   }
   target_shared->set_scope_info(source_shared->scope_info());
+  target_shared->set_outer_scope_info(source_shared->outer_scope_info());
   target_shared->set_length(source_shared->length());
   target_shared->set_num_literals(source_shared->num_literals());
   target_shared->set_feedback_metadata(source_shared->feedback_metadata());
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index dcc48c5..bb63a3d 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -18,7 +18,7 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
-  CHECK(function->shared()->is_resumable());
+  CHECK(IsResumableFunction(function->shared()->kind()));
 
   Handle<FixedArray> operand_stack;
   if (function->shared()->HasBytecodeArray()) {
@@ -49,7 +49,7 @@
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
-  CHECK(frame->function()->shared()->is_resumable());
+  CHECK(IsResumableFunction(frame->function()->shared()->kind()));
   DCHECK_EQ(frame->function(), generator_object->function());
   DCHECK(frame->function()->shared()->is_compiled());
   DCHECK(!frame->function()->IsOptimized());
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index 8b9d92e..7fcb802 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -25,6 +25,8 @@
 #include "unicode/decimfmt.h"
 #include "unicode/dtfmtsym.h"
 #include "unicode/dtptngen.h"
+#include "unicode/fieldpos.h"
+#include "unicode/fpositer.h"
 #include "unicode/locid.h"
 #include "unicode/normalizer2.h"
 #include "unicode/numfmt.h"
@@ -322,7 +324,7 @@
   Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
 
   Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
-  if (impl->IsTheHole(isolate)) {
+  if (!impl->IsJSObject()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
   }
@@ -393,6 +395,138 @@
                    result.length())));
 }
 
+namespace {
+// The list comes from third_party/icu/source/i18n/unicode/udat.h.
+// They're mapped to DateTimeFormat components listed at
+// https://tc39.github.io/ecma402/#sec-datetimeformat-abstracts .
+
+Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
+  switch (field_id) {
+    case -1:
+      return isolate->factory()->literal_string();
+    case UDAT_YEAR_FIELD:
+    case UDAT_EXTENDED_YEAR_FIELD:
+    case UDAT_YEAR_NAME_FIELD:
+      return isolate->factory()->year_string();
+    case UDAT_MONTH_FIELD:
+    case UDAT_STANDALONE_MONTH_FIELD:
+      return isolate->factory()->month_string();
+    case UDAT_DATE_FIELD:
+      return isolate->factory()->day_string();
+    case UDAT_HOUR_OF_DAY1_FIELD:
+    case UDAT_HOUR_OF_DAY0_FIELD:
+    case UDAT_HOUR1_FIELD:
+    case UDAT_HOUR0_FIELD:
+      return isolate->factory()->hour_string();
+    case UDAT_MINUTE_FIELD:
+      return isolate->factory()->minute_string();
+    case UDAT_SECOND_FIELD:
+      return isolate->factory()->second_string();
+    case UDAT_DAY_OF_WEEK_FIELD:
+    case UDAT_DOW_LOCAL_FIELD:
+    case UDAT_STANDALONE_DAY_FIELD:
+      return isolate->factory()->weekday_string();
+    case UDAT_AM_PM_FIELD:
+      return isolate->factory()->dayperiod_string();
+    case UDAT_TIMEZONE_FIELD:
+    case UDAT_TIMEZONE_RFC_FIELD:
+    case UDAT_TIMEZONE_GENERIC_FIELD:
+    case UDAT_TIMEZONE_SPECIAL_FIELD:
+    case UDAT_TIMEZONE_LOCALIZED_GMT_OFFSET_FIELD:
+    case UDAT_TIMEZONE_ISO_FIELD:
+    case UDAT_TIMEZONE_ISO_LOCAL_FIELD:
+      return isolate->factory()->timeZoneName_string();
+    case UDAT_ERA_FIELD:
+      return isolate->factory()->era_string();
+    default:
+      // Other UDAT_*_FIELD's cannot show up because there is no way to specify
+      // them via options of Intl.DateTimeFormat.
+      UNREACHABLE();
+      // To prevent MSVC from issuing C4715 warning.
+      return Handle<String>();
+  }
+}
+
+bool AddElement(Handle<JSArray> array, int index, int32_t field_id,
+                const icu::UnicodeString& formatted, int32_t begin, int32_t end,
+                Isolate* isolate) {
+  HandleScope scope(isolate);
+  Factory* factory = isolate->factory();
+  Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
+  Handle<String> value = IcuDateFieldIdToDateType(field_id, isolate);
+  JSObject::AddProperty(element, factory->type_string(), value, NONE);
+
+  icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, value, factory->NewStringFromTwoByte(Vector<const uint16_t>(
+                          reinterpret_cast<const uint16_t*>(field.getBuffer()),
+                          field.length())),
+      false);
+
+  JSObject::AddProperty(element, factory->value_string(), value, NONE);
+  RETURN_ON_EXCEPTION_VALUE(
+      isolate, JSObject::AddDataElement(array, index, element, NONE), false);
+  return true;
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
+  HandleScope scope(isolate);
+  Factory* factory = isolate->factory();
+
+  DCHECK(args.length() == 2);
+
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+
+  Handle<Object> value;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(date));
+
+  icu::SimpleDateFormat* date_format =
+      DateFormat::UnpackDateFormat(isolate, date_format_holder);
+  if (!date_format) return isolate->ThrowIllegalOperation();
+
+  icu::UnicodeString formatted;
+  icu::FieldPositionIterator fp_iter;
+  icu::FieldPosition fp;
+  UErrorCode status = U_ZERO_ERROR;
+  date_format->format(value->Number(), formatted, &fp_iter, status);
+  if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+  Handle<JSArray> result = factory->NewJSArray(0);
+  int32_t length = formatted.length();
+  if (length == 0) return *result;
+
+  int index = 0;
+  int32_t previous_end_pos = 0;
+  while (fp_iter.next(fp)) {
+    int32_t begin_pos = fp.getBeginIndex();
+    int32_t end_pos = fp.getEndIndex();
+
+    if (previous_end_pos < begin_pos) {
+      if (!AddElement(result, index, -1, formatted, previous_end_pos, begin_pos,
+                      isolate)) {
+        return isolate->heap()->undefined_value();
+      }
+      ++index;
+    }
+    if (!AddElement(result, index, fp.getField(), formatted, begin_pos, end_pos,
+                    isolate)) {
+      return isolate->heap()->undefined_value();
+    }
+    previous_end_pos = end_pos;
+    ++index;
+  }
+  if (previous_end_pos < length) {
+    if (!AddElement(result, index, -1, formatted, previous_end_pos, length,
+                    isolate)) {
+      return isolate->heap()->undefined_value();
+    }
+  }
+  JSObject::ValidateElements(result);
+  return *result;
+}
 
 RUNTIME_FUNCTION(Runtime_InternalDateParse) {
   HandleScope scope(isolate);
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index 3de0f16..26882b5 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -120,18 +120,17 @@
       error, isolate->factory()->stack_trace_symbol());
   // Patch the stack trace (array of <receiver, function, code, position>).
   if (stack_trace_obj->IsJSArray()) {
-    Handle<FixedArray> stack_elements(
-        FixedArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
-    DCHECK_EQ(1, stack_elements->length() % 4);
-    DCHECK(Code::cast(stack_elements->get(3))->kind() == Code::WASM_FUNCTION);
-    DCHECK(stack_elements->get(4)->IsSmi() &&
-           Smi::cast(stack_elements->get(4))->value() >= 0);
-    stack_elements->set(4, Smi::FromInt(-1 - byte_offset));
+    Handle<FrameArray> stack_elements(
+        FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+    DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
+    DCHECK(stack_elements->Offset(0)->value() >= 0);
+    stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
   }
-  Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
-      error, isolate->factory()->detailed_stack_trace_symbol());
+
   // Patch the detailed stack trace (array of JSObjects with various
   // properties).
+  Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+      error, isolate->factory()->detailed_stack_trace_symbol());
   if (detailed_stack_trace_obj->IsJSArray()) {
     Handle<FixedArray> stack_elements(
         FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
@@ -235,8 +234,7 @@
 
 RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
   HandleScope scope(isolate);
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewRangeError(MessageTemplate::kInvalidStringLength));
+  THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
 }
 
 RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
@@ -272,6 +270,23 @@
       isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
 }
 
+namespace {
+
+void PromiseRejectEvent(Isolate* isolate, Handle<JSObject> promise,
+                        Handle<Object> rejected_promise, Handle<Object> value,
+                        bool debug_event) {
+  if (isolate->debug()->is_active() && debug_event) {
+    isolate->debug()->OnPromiseReject(rejected_promise, value);
+  }
+  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+  // Do not report if we actually have a handler.
+  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
+    isolate->ReportPromiseReject(promise, value,
+                                 v8::kPromiseRejectWithNoHandler);
+  }
+}
+
+}  // namespace
 
 RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
   DCHECK(args.length() == 3);
@@ -279,16 +294,27 @@
   CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
   CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
-  if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
-  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  // Do not report if we actually have a handler.
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
-    isolate->ReportPromiseReject(promise, value,
-                                 v8::kPromiseRejectWithNoHandler);
-  }
+
+  PromiseRejectEvent(isolate, promise, promise, value, debug_event);
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
+  DCHECK(args.length() == 2);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+  Handle<Object> rejected_promise = promise;
+  if (isolate->debug()->is_active()) {
+    // If the Promise.reject call is caught, then this will return
+    // undefined, which will be interpreted by PromiseRejectEvent
+    // as being a caught exception event.
+    rejected_promise = isolate->GetPromiseOnStackOnThrow();
+  }
+  PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
+  return isolate->heap()->undefined_value();
+}
 
 RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
   DCHECK(args.length() == 1);
@@ -330,7 +356,7 @@
   CONVERT_SMI_ARG_CHECKED(size, 0);
   CHECK(IsAligned(size, kPointerSize));
   CHECK(size > 0);
-  CHECK(size <= Page::kMaxRegularHeapObjectSize);
+  CHECK(size <= kMaxRegularHeapObjectSize);
   return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
 }
 
@@ -342,7 +368,7 @@
   CONVERT_SMI_ARG_CHECKED(flags, 1);
   CHECK(IsAligned(size, kPointerSize));
   CHECK(size > 0);
-  CHECK(size <= Page::kMaxRegularHeapObjectSize);
+  CHECK(size <= kMaxRegularHeapObjectSize);
   bool double_align = AllocateDoubleAlignFlag::decode(flags);
   AllocationSpace space = AllocateTargetSpace::decode(flags);
   return *isolate->factory()->NewFillerObject(size, double_align, space);
@@ -528,6 +554,21 @@
   }
 }
 
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 6);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, resolve, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject, 3);
+  CONVERT_ARG_HANDLE_CHECKED(Object, before_debug_event, 4);
+  CONVERT_ARG_HANDLE_CHECKED(Object, after_debug_event, 5);
+  Handle<PromiseContainer> container = isolate->factory()->NewPromiseContainer(
+      resolution, then, resolve, reject, before_debug_event, after_debug_event);
+  isolate->EnqueueMicrotask(container);
+  return isolate->heap()->undefined_value();
+}
+
 RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index a0dd3e8..ebdf04c 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -6,8 +6,9 @@
 
 #include "src/allocation-site-scopes.h"
 #include "src/arguments.h"
+#include "src/ast/ast.h"
+#include "src/ast/compile-time-value.h"
 #include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 7908c62..70ed23b 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -677,6 +677,38 @@
   return *object;
 }
 
+RUNTIME_FUNCTION(Runtime_DefineDataProperty) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 5);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+  CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+
+  if (set_function_name) {
+    DCHECK(value->IsJSFunction());
+    JSFunction::SetName(Handle<JSFunction>::cast(value), name,
+                        isolate->factory()->empty_string());
+  }
+
+  PropertyDescriptor desc;
+  desc.set_writable(!(attrs & ReadOnly));
+  desc.set_enumerable(!(attrs & DontEnum));
+  desc.set_configurable(!(attrs & DontDelete));
+  desc.set_value(value);
+
+  Maybe<bool> result = JSReceiver::DefineOwnProperty(isolate, receiver, name,
+                                                     &desc, Object::DONT_THROW);
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+  if (result.IsNothing()) {
+    DCHECK(isolate->has_pending_exception());
+    return isolate->heap()->exception();
+  }
+
+  return *receiver;
+}
+
 // Return property without being observable by accessors or interceptors.
 RUNTIME_FUNCTION(Runtime_GetDataProperty) {
   HandleScope scope(isolate);
@@ -928,5 +960,32 @@
   return *value;
 }
 
+RUNTIME_FUNCTION(Runtime_LoadModuleExport) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  Handle<Module> module(isolate->context()->module());
+  return *Module::LoadExport(module, name);
+}
+
+RUNTIME_FUNCTION(Runtime_LoadModuleImport) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, module_request, 1);
+  Handle<Module> module(isolate->context()->module());
+  return *Module::LoadImport(module, name, module_request->value());
+}
+
+RUNTIME_FUNCTION(Runtime_StoreModuleExport) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  Handle<Module> module(isolate->context()->module());
+  Module::StoreExport(module, name, value);
+  return isolate->heap()->undefined_value();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index b36e5e6..977e6bc 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -794,7 +794,7 @@
   return regexp->source();
 }
 
-
+// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
 RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
   HandleScope handle_scope(isolate);
   DCHECK(args.length() == 3);
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index 26bfb29..0c037db 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -44,7 +44,7 @@
 Object* DeclareGlobal(
     Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
     Handle<Object> value, PropertyAttributes attr, bool is_var,
-    bool is_function, RedeclarationType redeclaration_type,
+    bool is_function_declaration, RedeclarationType redeclaration_type,
     Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(),
     FeedbackVectorSlot slot = FeedbackVectorSlot::Invalid()) {
   Handle<ScriptContextTable> script_contexts(
@@ -60,7 +60,14 @@
   }
 
   // Do the lookup own properties only, see ES5 erratum.
-  LookupIterator it(global, name, global, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  LookupIterator::Configuration lookup_config(
+      LookupIterator::Configuration::OWN_SKIP_INTERCEPTOR);
+  if (is_function_declaration) {
+    // For function declarations, use the interceptor on the declaration. For
+    // non-functions, use it only on initialization.
+    lookup_config = LookupIterator::Configuration::OWN;
+  }
+  LookupIterator it(global, name, global, lookup_config);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
 
@@ -71,7 +78,7 @@
     // Skip var re-declarations.
     if (is_var) return isolate->heap()->undefined_value();
 
-    DCHECK(is_function);
+    DCHECK(is_function_declaration);
     if ((old_attributes & DONT_DELETE) != 0) {
       // Only allow reconfiguring globals to functions in user code (no
       // natives, which are marked as read-only).
@@ -83,9 +90,9 @@
       if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
           (it.state() == LookupIterator::ACCESSOR &&
            it.GetAccessors()->IsAccessorPair())) {
-        // ES#sec-globaldeclarationinstantiation 5.d:
+        // ECMA-262 section 15.1.11 GlobalDeclarationInstantiation 5.d:
         // If hasRestrictedGlobal is true, throw a SyntaxError exception.
-        // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+        // ECMA-262 section 18.2.1.3 EvalDeclarationInstantiation 8.a.iv.1.b:
         // If fnDefinable is false, throw a TypeError exception.
         return ThrowRedeclarationError(isolate, name, redeclaration_type);
       }
@@ -102,6 +109,10 @@
     if (it.state() == LookupIterator::ACCESSOR) it.Delete();
   }
 
+  if (is_function_declaration) {
+    it.Restart();
+  }
+
   // Define or redefine own property.
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
@@ -294,9 +305,8 @@
       DCHECK(context->IsBlockContext());
       object = isolate->factory()->NewJSObject(
           isolate->context_extension_function());
-      Handle<HeapObject> extension =
-          isolate->factory()->NewSloppyBlockWithEvalContextExtension(
-              handle(context->scope_info()), object);
+      Handle<HeapObject> extension = isolate->factory()->NewContextExtension(
+          handle(context->scope_info()), object);
       context->set_extension(*extension);
     } else {
       object = handle(context->extension_object(), isolate);
@@ -665,8 +675,6 @@
   Handle<Context> result =
       isolate->factory()->NewScriptContext(closure, scope_info);
 
-  result->InitializeGlobalSlots();
-
   DCHECK(function->context() == isolate->context());
   DCHECK(*global_object == result->global_object());
 
@@ -691,26 +699,41 @@
 
 RUNTIME_FUNCTION(Runtime_PushWithContext) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+  CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
   Handle<Context> current(isolate->context());
-  Handle<Context> context =
-      isolate->factory()->NewWithContext(function, current, extension_object);
+  Handle<Context> context = isolate->factory()->NewWithContext(
+      function, current, scope_info, extension_object);
   isolate->set_context(*context);
   return *context;
 }
 
+RUNTIME_FUNCTION(Runtime_PushModuleContext) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Module, module, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+  CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 2);
+  DCHECK(function->context() == isolate->context());
+
+  Handle<Context> context =
+      isolate->factory()->NewModuleContext(module, function, scope_info);
+  isolate->set_context(*context);
+  return *context;
+}
 
 RUNTIME_FUNCTION(Runtime_PushCatchContext) {
   HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
+  CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 3);
   Handle<Context> current(isolate->context());
   Handle<Context> context = isolate->factory()->NewCatchContext(
-      function, current, name, thrown_object);
+      function, current, scope_info, name, thrown_object);
   isolate->set_context(*context);
   return *context;
 }
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index 517513e..f5bda59 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -103,140 +103,12 @@
   return Smi::FromInt(position);
 }
 
-
-template <typename schar, typename pchar>
-static int StringMatchBackwards(Vector<const schar> subject,
-                                Vector<const pchar> pattern, int idx) {
-  int pattern_length = pattern.length();
-  DCHECK(pattern_length >= 1);
-  DCHECK(idx + pattern_length <= subject.length());
-
-  if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
-    for (int i = 0; i < pattern_length; i++) {
-      uc16 c = pattern[i];
-      if (c > String::kMaxOneByteCharCode) {
-        return -1;
-      }
-    }
-  }
-
-  pchar pattern_first_char = pattern[0];
-  for (int i = idx; i >= 0; i--) {
-    if (subject[i] != pattern_first_char) continue;
-    int j = 1;
-    while (j < pattern_length) {
-      if (pattern[j] != subject[i + j]) {
-        break;
-      }
-      j++;
-    }
-    if (j == pattern_length) {
-      return i;
-    }
-  }
-  return -1;
-}
-
-
 RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
-  uint32_t start_index = 0;
-  if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
-  uint32_t pat_length = pat->length();
-  uint32_t sub_length = sub->length();
-
-  if (start_index + pat_length > sub_length) {
-    start_index = sub_length - pat_length;
-  }
-
-  if (pat_length == 0) {
-    return Smi::FromInt(start_index);
-  }
-
-  sub = String::Flatten(sub);
-  pat = String::Flatten(pat);
-
-  int position = -1;
-  DisallowHeapAllocation no_gc;  // ensure vectors stay valid
-
-  String::FlatContent sub_content = sub->GetFlatContent();
-  String::FlatContent pat_content = pat->GetFlatContent();
-
-  if (pat_content.IsOneByte()) {
-    Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
-    if (sub_content.IsOneByte()) {
-      position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
-                                      start_index);
-    } else {
-      position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
-                                      start_index);
-    }
-  } else {
-    Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
-    if (sub_content.IsOneByte()) {
-      position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
-                                      start_index);
-    } else {
-      position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
-                                      start_index);
-    }
-  }
-
-  return Smi::FromInt(position);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
-
-  if (str1.is_identical_to(str2)) return Smi::FromInt(0);  // Equal.
-  int str1_length = str1->length();
-  int str2_length = str2->length();
-
-  // Decide trivial cases without flattening.
-  if (str1_length == 0) {
-    if (str2_length == 0) return Smi::FromInt(0);  // Equal.
-    return Smi::FromInt(-str2_length);
-  } else {
-    if (str2_length == 0) return Smi::FromInt(str1_length);
-  }
-
-  int end = str1_length < str2_length ? str1_length : str2_length;
-
-  // No need to flatten if we are going to find the answer on the first
-  // character.  At this point we know there is at least one character
-  // in each string, due to the trivial case handling above.
-  int d = str1->Get(0) - str2->Get(0);
-  if (d != 0) return Smi::FromInt(d);
-
-  str1 = String::Flatten(str1);
-  str2 = String::Flatten(str2);
-
-  DisallowHeapAllocation no_gc;
-  String::FlatContent flat1 = str1->GetFlatContent();
-  String::FlatContent flat2 = str2->GetFlatContent();
-
-  for (int i = 0; i < end; i++) {
-    if (flat1.Get(i) != flat2.Get(i)) {
-      return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
-    }
-  }
-
-  return Smi::FromInt(str1_length - str2_length);
+  return String::LastIndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+                             isolate->factory()->undefined_value());
 }
 
-
 RUNTIME_FUNCTION(Runtime_SubString) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index 0d6cb0e..8100d2c 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -8,6 +8,7 @@
 
 #include "src/arguments.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/compiler.h"
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
@@ -419,8 +420,8 @@
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 2 || args.length() == 3);
 #ifdef DEBUG
-  CONVERT_SMI_ARG_CHECKED(interval, 0);
-  CONVERT_SMI_ARG_CHECKED(timeout, 1);
+  CONVERT_INT32_ARG_CHECKED(interval, 0);
+  CONVERT_INT32_ARG_CHECKED(timeout, 1);
   isolate->heap()->set_allocation_timeout(timeout);
   FLAG_gc_interval = interval;
   if (args.length() == 3) {
@@ -456,7 +457,6 @@
   }
   args[0]->Print(os);
   if (args[0]->IsHeapObject()) {
-    os << "\n";
     HeapObject::cast(args[0])->map()->Print(os);
   }
 #else
@@ -768,7 +768,34 @@
   if (!maybe_compiled_module.ToHandle(&compiled_module)) {
     return isolate->heap()->undefined_value();
   }
-  return *wasm::CreateCompiledModuleObject(isolate, compiled_module);
+  return *wasm::CreateCompiledModuleObject(isolate, compiled_module,
+                                           wasm::ModuleOrigin::kWasmOrigin);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
+  wasm::testing::ValidateInstancesChain(isolate, module_obj,
+                                        instance_count->value());
+  return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+  wasm::testing::ValidateModuleState(isolate, module_obj);
+  return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+  wasm::testing::ValidateOrphanedInstance(isolate, instance_obj);
+  return isolate->heap()->ToBoolean(true);
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index 04bf368..ba422bf 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -200,7 +200,6 @@
   size_t length = 0;
   if (source->IsJSTypedArray() &&
       JSTypedArray::cast(*source)->type() == array_type) {
-    length_obj = handle(JSTypedArray::cast(*source)->length(), isolate);
     length = JSTypedArray::cast(*source)->length_value();
   } else {
     CHECK(TryNumberToSize(*length_obj, &length));
@@ -246,6 +245,7 @@
   Handle<Object> byte_length_obj(
       isolate->factory()->NewNumberFromSize(byte_length));
   holder->set_byte_length(*byte_length_obj);
+  length_obj = isolate->factory()->NewNumberFromSize(length);
   holder->set_length(*length_obj);
 
   Handle<FixedTypedArrayBase> elements =
@@ -419,217 +419,5 @@
                                     obj->type() == kExternalInt32Array);
 }
 
-
-inline static bool NeedToFlipBytes(bool is_little_endian) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
-  return !is_little_endian;
-#else
-  return is_little_endian;
-#endif
-}
-
-
-template <int n>
-inline void CopyBytes(uint8_t* target, uint8_t* source) {
-  for (int i = 0; i < n; i++) {
-    *(target++) = *(source++);
-  }
-}
-
-
-template <int n>
-inline void FlipBytes(uint8_t* target, uint8_t* source) {
-  source = source + (n - 1);
-  for (int i = 0; i < n; i++) {
-    *(target++) = *(source--);
-  }
-}
-
-
-template <typename T>
-inline static bool DataViewGetValue(Isolate* isolate,
-                                    Handle<JSDataView> data_view,
-                                    Handle<Object> byte_offset_obj,
-                                    bool is_little_endian, T* result) {
-  size_t byte_offset = 0;
-  if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
-    return false;
-  }
-  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
-  size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
-  size_t data_view_byte_length = NumberToSize(data_view->byte_length());
-  if (byte_offset + sizeof(T) > data_view_byte_length ||
-      byte_offset + sizeof(T) < byte_offset) {  // overflow
-    return false;
-  }
-
-  union Value {
-    T data;
-    uint8_t bytes[sizeof(T)];
-  };
-
-  Value value;
-  size_t buffer_offset = data_view_byte_offset + byte_offset;
-  DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
-  uint8_t* source =
-      static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
-  if (NeedToFlipBytes(is_little_endian)) {
-    FlipBytes<sizeof(T)>(value.bytes, source);
-  } else {
-    CopyBytes<sizeof(T)>(value.bytes, source);
-  }
-  *result = value.data;
-  return true;
-}
-
-
-template <typename T>
-static bool DataViewSetValue(Isolate* isolate, Handle<JSDataView> data_view,
-                             Handle<Object> byte_offset_obj,
-                             bool is_little_endian, T data) {
-  size_t byte_offset = 0;
-  if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
-    return false;
-  }
-  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
-  size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
-  size_t data_view_byte_length = NumberToSize(data_view->byte_length());
-  if (byte_offset + sizeof(T) > data_view_byte_length ||
-      byte_offset + sizeof(T) < byte_offset) {  // overflow
-    return false;
-  }
-
-  union Value {
-    T data;
-    uint8_t bytes[sizeof(T)];
-  };
-
-  Value value;
-  value.data = data;
-  size_t buffer_offset = data_view_byte_offset + byte_offset;
-  DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
-  uint8_t* target =
-      static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
-  if (NeedToFlipBytes(is_little_endian)) {
-    FlipBytes<sizeof(T)>(target, value.bytes);
-  } else {
-    CopyBytes<sizeof(T)>(target, value.bytes);
-  }
-  return true;
-}
-
-
-#define DATA_VIEW_GETTER(TypeName, Type, Converter)                        \
-  RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) {                        \
-    HandleScope scope(isolate);                                            \
-    DCHECK(args.length() == 3);                                            \
-    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                     \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                          \
-    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2);                      \
-    Type result;                                                           \
-    if (DataViewGetValue(isolate, holder, offset, is_little_endian,        \
-                         &result)) {                                       \
-      return *isolate->factory()->Converter(result);                       \
-    } else {                                                               \
-      THROW_NEW_ERROR_RETURN_FAILURE(                                      \
-          isolate,                                                         \
-          NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset)); \
-    }                                                                      \
-  }
-
-DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Float32, float, NewNumber)
-DATA_VIEW_GETTER(Float64, double, NewNumber)
-
-#undef DATA_VIEW_GETTER
-
-
-template <typename T>
-static T DataViewConvertValue(double value);
-
-
-template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
-  return static_cast<int8_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
-  return static_cast<int16_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
-  return DoubleToInt32(value);
-}
-
-
-template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
-  return static_cast<uint8_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
-  return static_cast<uint16_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
-  return DoubleToUint32(value);
-}
-
-
-template <>
-float DataViewConvertValue<float>(double value) {
-  return static_cast<float>(value);
-}
-
-
-template <>
-double DataViewConvertValue<double>(double value) {
-  return value;
-}
-
-
-#define DATA_VIEW_SETTER(TypeName, Type)                                   \
-  RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) {                        \
-    HandleScope scope(isolate);                                            \
-    DCHECK(args.length() == 4);                                            \
-    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                     \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                          \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);                           \
-    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3);                      \
-    Type v = DataViewConvertValue<Type>(value->Number());                  \
-    if (DataViewSetValue(isolate, holder, offset, is_little_endian, v)) {  \
-      return isolate->heap()->undefined_value();                           \
-    } else {                                                               \
-      THROW_NEW_ERROR_RETURN_FAILURE(                                      \
-          isolate,                                                         \
-          NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset)); \
-    }                                                                      \
-  }
-
-DATA_VIEW_SETTER(Uint8, uint8_t)
-DATA_VIEW_SETTER(Int8, int8_t)
-DATA_VIEW_SETTER(Uint16, uint16_t)
-DATA_VIEW_SETTER(Int16, int16_t)
-DATA_VIEW_SETTER(Uint32, uint32_t)
-DATA_VIEW_SETTER(Int32, int32_t)
-DATA_VIEW_SETTER(Float32, float)
-DATA_VIEW_SETTER(Float64, double)
-
-#undef DATA_VIEW_SETTER
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc
index 37608e6..ab69046 100644
--- a/src/runtime/runtime-wasm.cc
+++ b/src/runtime/runtime-wasm.cc
@@ -18,17 +18,11 @@
 namespace v8 {
 namespace internal {
 
-namespace {
-const int kWasmMemArrayBuffer = 2;
-}
-
-RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
+RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
   HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  uint32_t delta_pages = 0;
-  CHECK(args[0]->ToUint32(&delta_pages));
-  Handle<JSObject> module_object;
+  DCHECK_EQ(0, args.length());
 
+  Handle<JSObject> module_instance;
   {
     // Get the module JSObject
     DisallowHeapAllocation no_allocation;
@@ -37,77 +31,33 @@
         Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
     Code* code =
         isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-    FixedArray* deopt_data = code->deoptimization_data();
-    DCHECK(deopt_data->length() == 2);
-    module_object = Handle<JSObject>::cast(handle(deopt_data->get(0), isolate));
-    CHECK(!module_object->IsNull(isolate));
+    Object* owning_instance = wasm::GetOwningWasmInstance(code);
+    CHECK_NOT_NULL(owning_instance);
+    module_instance = handle(JSObject::cast(owning_instance), isolate);
   }
+  return *isolate->factory()->NewNumberFromInt(
+      wasm::GetInstanceMemorySize(isolate, module_instance));
+}
 
-  Address old_mem_start, new_mem_start;
-  uint32_t old_size, new_size;
-
-  // Get mem buffer associated with module object
-  Handle<Object> obj(module_object->GetInternalField(kWasmMemArrayBuffer),
-                     isolate);
-
-  if (obj->IsUndefined(isolate)) {
-    // If module object does not have linear memory associated with it,
-    // Allocate new array buffer of given size.
-    old_mem_start = nullptr;
-    old_size = 0;
-    // TODO(gdeepti): Fix bounds check to take into account size of memtype.
-    new_size = delta_pages * wasm::WasmModule::kPageSize;
-    if (delta_pages > wasm::WasmModule::kMaxMemPages) {
-      return *isolate->factory()->NewNumberFromInt(-1);
-    }
-    new_mem_start =
-        static_cast<Address>(isolate->array_buffer_allocator()->Allocate(
-            static_cast<uint32_t>(new_size)));
-    if (new_mem_start == NULL) {
-      return *isolate->factory()->NewNumberFromInt(-1);
-    }
-#if DEBUG
-    // Double check the API allocator actually zero-initialized the memory.
-    for (size_t i = old_size; i < new_size; i++) {
-      DCHECK_EQ(0, new_mem_start[i]);
-    }
-#endif
-  } else {
-    Handle<JSArrayBuffer> old_buffer = Handle<JSArrayBuffer>::cast(obj);
-    old_mem_start = static_cast<Address>(old_buffer->backing_store());
-    old_size = old_buffer->byte_length()->Number();
-    // If the old memory was zero-sized, we should have been in the
-    // "undefined" case above.
-    DCHECK_NOT_NULL(old_mem_start);
-    DCHECK_NE(0, old_size);
-
-    new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
-    if (new_size >
-        wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
-      return *isolate->factory()->NewNumberFromInt(-1);
-    }
-    new_mem_start = static_cast<Address>(realloc(old_mem_start, new_size));
-    if (new_mem_start == NULL) {
-      return *isolate->factory()->NewNumberFromInt(-1);
-    }
-    old_buffer->set_is_external(true);
-    isolate->heap()->UnregisterArrayBuffer(*old_buffer);
-    // Zero initializing uninitialized memory from realloc
-    memset(new_mem_start + old_size, 0, new_size - old_size);
+RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
+  Handle<JSObject> module_instance;
+  {
+    // Get the module JSObject
+    DisallowHeapAllocation no_allocation;
+    const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+    Address pc =
+        Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
+    Code* code =
+        isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+    Object* owning_instance = wasm::GetOwningWasmInstance(code);
+    CHECK_NOT_NULL(owning_instance);
+    module_instance = handle(JSObject::cast(owning_instance), isolate);
   }
-
-  Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
-  JSArrayBuffer::Setup(buffer, isolate, false, new_mem_start, new_size);
-  buffer->set_is_neuterable(false);
-
-  // Set new buffer to be wasm memory
-  module_object->SetInternalField(kWasmMemArrayBuffer, *buffer);
-
-  CHECK(wasm::UpdateWasmModuleMemory(module_object, old_mem_start,
-                                     new_mem_start, old_size, new_size));
-
-  return *isolate->factory()->NewNumberFromInt(old_size /
-                                               wasm::WasmModule::kPageSize);
+  return *isolate->factory()->NewNumberFromInt(
+      wasm::GrowInstanceMemory(isolate, module_instance, delta_pages));
 }
 
 RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
@@ -116,5 +66,28 @@
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
 }
+
+RUNTIME_FUNCTION(Runtime_WasmThrow) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_SMI_ARG_CHECKED(lower, 0);
+  CONVERT_SMI_ARG_CHECKED(upper, 1);
+
+  const int32_t thrown_value = (upper << 16) | lower;
+
+  return isolate->Throw(*isolate->factory()->NewNumberFromInt(thrown_value));
+}
+
+RUNTIME_FUNCTION(Runtime_WasmGetCaughtExceptionValue) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  Object* exception = args[0];
+  // The unwinder will only deliver exceptions to wasm if the exception is a
+  // Number or a Smi (which we have just converted to a Number.) This logic
+  // lives in Isolate::is_catchable_by_wasm(Object*).
+  CHECK(exception->IsNumber());
+  return exception;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime.cc b/src/runtime/runtime.cc
index 151e240..9d1cd39 100644
--- a/src/runtime/runtime.cc
+++ b/src/runtime/runtime.cc
@@ -5,6 +5,7 @@
 #include "src/runtime/runtime.h"
 
 #include "src/assembler.h"
+#include "src/base/hashmap.h"
 #include "src/contexts.h"
 #include "src/handles-inl.h"
 #include "src/heap/heap.h"
@@ -57,30 +58,61 @@
 #undef I
 #undef F
 
+namespace {
 
-void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
-                                               Handle<NameDictionary> dict) {
-  DCHECK(dict->NumberOfElements() == 0);
-  HandleScope scope(isolate);
-  for (int i = 0; i < kNumFunctions; ++i) {
-    const char* name = kIntrinsicFunctions[i].name;
-    if (name == NULL) continue;
-    Handle<NameDictionary> new_dict = NameDictionary::Add(
-        dict, isolate->factory()->InternalizeUtf8String(name),
-        Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails::Empty());
-    // The dictionary does not need to grow.
-    CHECK(new_dict.is_identical_to(dict));
+V8_DECLARE_ONCE(initialize_function_name_map_once);
+static const base::CustomMatcherHashMap* kRuntimeFunctionNameMap;
+
+struct IntrinsicFunctionIdentifier {
+  IntrinsicFunctionIdentifier(const unsigned char* data, const int length)
+      : data_(data), length_(length) {}
+
+  static bool Match(void* key1, void* key2) {
+    const IntrinsicFunctionIdentifier* lhs =
+        static_cast<IntrinsicFunctionIdentifier*>(key1);
+    const IntrinsicFunctionIdentifier* rhs =
+        static_cast<IntrinsicFunctionIdentifier*>(key2);
+    if (lhs->length_ != rhs->length_) return false;
+    return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs->data_),
+                                reinterpret_cast<const uint8_t*>(rhs->data_),
+                                rhs->length_) == 0;
   }
+
+  uint32_t Hash() {
+    return StringHasher::HashSequentialString<uint8_t>(
+        data_, length_, v8::internal::kZeroHashSeed);
+  }
+
+  const unsigned char* data_;
+  const int length_;
+};
+
+void InitializeIntrinsicFunctionNames() {
+  base::CustomMatcherHashMap* function_name_map =
+      new base::CustomMatcherHashMap(IntrinsicFunctionIdentifier::Match);
+  for (size_t i = 0; i < arraysize(kIntrinsicFunctions); ++i) {
+    const Runtime::Function* function = &kIntrinsicFunctions[i];
+    IntrinsicFunctionIdentifier* identifier = new IntrinsicFunctionIdentifier(
+        reinterpret_cast<const unsigned char*>(function->name),
+        static_cast<int>(strlen(function->name)));
+    base::HashMap::Entry* entry =
+        function_name_map->InsertNew(identifier, identifier->Hash());
+    entry->value = const_cast<Runtime::Function*>(function);
+  }
+  kRuntimeFunctionNameMap = function_name_map;
 }
 
+}  // namespace
 
-const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
-  Heap* heap = name->GetHeap();
-  int entry = heap->intrinsic_function_names()->FindEntry(name);
-  if (entry != kNotFound) {
-    Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
-    int function_index = Smi::cast(smi_index)->value();
-    return &(kIntrinsicFunctions[function_index]);
+const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
+                                                  int length) {
+  base::CallOnce(&initialize_function_name_map_once,
+                 &InitializeIntrinsicFunctionNames);
+  IntrinsicFunctionIdentifier identifier(name, length);
+  base::HashMap::Entry* entry =
+      kRuntimeFunctionNameMap->Lookup(&identifier, identifier.Hash());
+  if (entry) {
+    return reinterpret_cast<Function*>(entry->value);
   }
   return NULL;
 }
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 38eb51d..cbdaf0f 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -11,7 +11,7 @@
 #include "src/base/platform/time.h"
 #include "src/objects.h"
 #include "src/unicode.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -204,12 +204,10 @@
 #define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
 
 #define FOR_EACH_INTRINSIC_FORIN(F) \
-  F(ForInDone, 2, 1)                \
   F(ForInEnumerate, 1, 1)           \
   F(ForInFilter, 2, 1)              \
   F(ForInHasProperty, 2, 1)         \
-  F(ForInNext, 4, 1)                \
-  F(ForInStep, 1, 1)
+  F(ForInNext, 4, 1)
 
 #define FOR_EACH_INTRINSIC_INTERPRETER(F) \
   F(InterpreterNewClosure, 2, 1)          \
@@ -262,6 +260,7 @@
   F(GetImplFromInitializedIntlObject, 1, 1)  \
   F(CreateDateTimeFormat, 3, 1)              \
   F(InternalDateFormat, 2, 1)                \
+  F(InternalDateFormatToParts, 2, 1)         \
   F(InternalDateParse, 2, 1)                 \
   F(CreateNumberFormat, 3, 1)                \
   F(InternalNumberFormat, 2, 1)              \
@@ -291,6 +290,7 @@
   F(CheckIsBootstrapping, 0, 1)                     \
   F(CreateListFromArrayLike, 1, 1)                  \
   F(EnqueueMicrotask, 1, 1)                         \
+  F(EnqueuePromiseResolveThenableJob, 6, 1)         \
   F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1)  \
   F(ExportExperimentalFromRuntime, 1, 1)            \
   F(ExportFromRuntime, 1, 1)                        \
@@ -304,6 +304,7 @@
   F(NewTypeError, 2, 1)                             \
   F(OrdinaryHasInstance, 2, 1)                      \
   F(PromiseRejectEvent, 3, 1)                       \
+  F(PromiseRejectEventFromStack, 2, 1)              \
   F(PromiseRevokeReject, 1, 1)                      \
   F(PromoteScheduledException, 0, 1)                \
   F(ReThrow, 1, 1)                                  \
@@ -394,6 +395,7 @@
   F(IsJSGlobalProxy, 1, 1)                           \
   F(DefineAccessorPropertyUnchecked, 5, 1)           \
   F(DefineDataPropertyInLiteral, 5, 1)               \
+  F(DefineDataProperty, 5, 1)                        \
   F(GetDataProperty, 2, 1)                           \
   F(GetConstructorName, 1, 1)                        \
   F(HasFastPackedElements, 1, 1)                     \
@@ -416,7 +418,10 @@
   F(HasInPrototypeChain, 2, 1)                       \
   F(CreateIterResultObject, 2, 1)                    \
   F(IsAccessCheckNeeded, 1, 1)                       \
-  F(CreateDataProperty, 3, 1)
+  F(CreateDataProperty, 3, 1)                        \
+  F(LoadModuleExport, 1, 1)                          \
+  F(LoadModuleImport, 2, 1)                          \
+  F(StoreModuleExport, 2, 1)
 
 #define FOR_EACH_INTRINSIC_OPERATORS(F) \
   F(Multiply, 2, 1)                     \
@@ -475,8 +480,9 @@
   F(NewClosure_Tenured, 1, 1)           \
   F(NewScriptContext, 2, 1)             \
   F(NewFunctionContext, 1, 1)           \
-  F(PushWithContext, 2, 1)              \
-  F(PushCatchContext, 3, 1)             \
+  F(PushModuleContext, 3, 1)            \
+  F(PushWithContext, 3, 1)              \
+  F(PushCatchContext, 4, 1)             \
   F(PushBlockContext, 2, 1)             \
   F(DeleteLookupSlot, 1, 1)             \
   F(LoadLookupSlot, 1, 1)               \
@@ -797,8 +803,7 @@
 #define FOR_EACH_INTRINSIC_STRINGS(F)     \
   F(StringReplaceOneCharWithString, 3, 1) \
   F(StringIndexOf, 3, 1)                  \
-  F(StringLastIndexOf, 3, 1)              \
-  F(StringLocaleCompare, 2, 1)            \
+  F(StringLastIndexOf, 2, 1)              \
   F(SubString, 3, 1)                      \
   F(StringAdd, 2, 1)                      \
   F(InternalizeString, 1, 1)              \
@@ -888,7 +893,10 @@
   F(SerializeWasmModule, 1, 1)                \
   F(DeserializeWasmModule, 1, 1)              \
   F(IsAsmWasmCode, 1, 1)                      \
-  F(IsNotAsmWasmCode, 1, 1)
+  F(IsNotAsmWasmCode, 1, 1)                   \
+  F(ValidateWasmInstancesChain, 2, 1)         \
+  F(ValidateWasmModuleState, 1, 1)            \
+  F(ValidateWasmOrphanedInstance, 1, 1)
 
 #define FOR_EACH_INTRINSIC_TYPEDARRAY(F)     \
   F(ArrayBufferGetByteLength, 1, 1)          \
@@ -905,27 +913,14 @@
   F(IsTypedArray, 1, 1)                      \
   F(IsSharedTypedArray, 1, 1)                \
   F(IsSharedIntegerTypedArray, 1, 1)         \
-  F(IsSharedInteger32TypedArray, 1, 1)       \
-  F(DataViewGetUint8, 3, 1)                  \
-  F(DataViewGetInt8, 3, 1)                   \
-  F(DataViewGetUint16, 3, 1)                 \
-  F(DataViewGetInt16, 3, 1)                  \
-  F(DataViewGetUint32, 3, 1)                 \
-  F(DataViewGetInt32, 3, 1)                  \
-  F(DataViewGetFloat32, 3, 1)                \
-  F(DataViewGetFloat64, 3, 1)                \
-  F(DataViewSetUint8, 4, 1)                  \
-  F(DataViewSetInt8, 4, 1)                   \
-  F(DataViewSetUint16, 4, 1)                 \
-  F(DataViewSetInt16, 4, 1)                  \
-  F(DataViewSetUint32, 4, 1)                 \
-  F(DataViewSetInt32, 4, 1)                  \
-  F(DataViewSetFloat32, 4, 1)                \
-  F(DataViewSetFloat64, 4, 1)
+  F(IsSharedInteger32TypedArray, 1, 1)
 
 #define FOR_EACH_INTRINSIC_WASM(F) \
   F(WasmGrowMemory, 1, 1)          \
-  F(WasmThrowTypeError, 0, 1)
+  F(WasmMemorySize, 0, 1)          \
+  F(WasmThrowTypeError, 0, 1)      \
+  F(WasmThrow, 2, 1)               \
+  F(WasmGetCaughtExceptionValue, 1, 1)
 
 #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
   F(LoadLookupSlotForCall, 1, 2)
@@ -935,30 +930,26 @@
 
 // Most intrinsics are implemented in the runtime/ directory, but ICs are
 // implemented in ic.cc for now.
-#define FOR_EACH_INTRINSIC_IC(F)                 \
-  F(BinaryOpIC_Miss, 2, 1)                       \
-  F(BinaryOpIC_MissWithAllocationSite, 3, 1)     \
-  F(CallIC_Miss, 3, 1)                           \
-  F(CompareIC_Miss, 3, 1)                        \
-  F(ElementsTransitionAndStoreIC_Miss, 5, 1)     \
-  F(KeyedLoadIC_Miss, 4, 1)                      \
-  F(KeyedLoadIC_MissFromStubFailure, 4, 1)       \
-  F(KeyedStoreIC_Miss, 5, 1)                     \
-  F(KeyedStoreIC_MissFromStubFailure, 5, 1)      \
-  F(KeyedStoreIC_Slow, 5, 1)                     \
-  F(LoadElementWithInterceptor, 2, 1)            \
-  F(LoadGlobalIC_Miss, 2, 1)                     \
-  F(LoadGlobalIC_Slow, 2, 1)                     \
-  F(LoadIC_Miss, 4, 1)                           \
-  F(LoadIC_MissFromStubFailure, 4, 1)            \
-  F(LoadPropertyWithInterceptor, 3, 1)           \
-  F(LoadPropertyWithInterceptorOnly, 3, 1)       \
-  F(StoreCallbackProperty, 6, 1)                 \
-  F(StoreIC_Miss, 5, 1)                          \
-  F(StoreIC_MissFromStubFailure, 5, 1)           \
-  F(TransitionStoreIC_MissFromStubFailure, 6, 1) \
-  F(StorePropertyWithInterceptor, 3, 1)          \
-  F(ToBooleanIC_Miss, 1, 1)                      \
+#define FOR_EACH_INTRINSIC_IC(F)             \
+  F(BinaryOpIC_Miss, 2, 1)                   \
+  F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
+  F(CallIC_Miss, 3, 1)                       \
+  F(CompareIC_Miss, 3, 1)                    \
+  F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
+  F(KeyedLoadIC_Miss, 4, 1)                  \
+  F(KeyedLoadIC_MissFromStubFailure, 4, 1)   \
+  F(KeyedStoreIC_Miss, 5, 1)                 \
+  F(KeyedStoreIC_Slow, 5, 1)                 \
+  F(LoadElementWithInterceptor, 2, 1)        \
+  F(LoadGlobalIC_Miss, 2, 1)                 \
+  F(LoadGlobalIC_Slow, 2, 1)                 \
+  F(LoadIC_Miss, 4, 1)                       \
+  F(LoadPropertyWithInterceptor, 3, 1)       \
+  F(LoadPropertyWithInterceptorOnly, 3, 1)   \
+  F(StoreCallbackProperty, 6, 1)             \
+  F(StoreIC_Miss, 5, 1)                      \
+  F(StorePropertyWithInterceptor, 3, 1)      \
+  F(ToBooleanIC_Miss, 1, 1)                  \
   F(Unreachable, 0, 1)
 
 #define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
@@ -1044,13 +1035,8 @@
 
   static const int kNotFound = -1;
 
-  // Add internalized strings for all the intrinsic function names to a
-  // StringDictionary.
-  static void InitializeIntrinsicFunctionNames(Isolate* isolate,
-                                               Handle<NameDictionary> dict);
-
-  // Get the intrinsic function with the given name, which must be internalized.
-  static const Function* FunctionForName(Handle<String> name);
+  // Get the intrinsic function with the given name.
+  static const Function* FunctionForName(const unsigned char* name, int length);
 
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
index ce80384..b1bf02d 100644
--- a/src/s390/code-stubs-s390.cc
+++ b/src/s390/code-stubs-s390.cc
@@ -1726,7 +1726,6 @@
   // r4 : feedback vector
   // r5 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1749,7 +1748,7 @@
   Register weak_value = r9;
   __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
   __ CmpP(r3, weak_value);
-  __ beq(&done_increment_count, Label::kNear);
+  __ beq(&done, Label::kNear);
   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
   __ beq(&done, Label::kNear);
   __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
@@ -1772,7 +1771,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
   __ CmpP(r3, r7);
   __ bne(&megamorphic);
-  __ b(&done_increment_count, Label::kNear);
+  __ b(&done, Label::kNear);
 
   __ bind(&miss);
 
@@ -1802,32 +1801,22 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done_initialize_count, Label::kNear);
+  __ b(&done, Label::kNear);
 
   __ bind(&not_array_function);
 
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ LoadSmiLiteral(r7, Smi::FromInt(1));
-  __ SmiToPtrArrayOffset(r6, r5);
-  __ AddP(r6, r4, r6);
-  __ StoreP(r7, FieldMemOperand(r6, count_offset), r0);
-  __ b(&done, Label::kNear);
+  __ bind(&done);
 
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  // Increment the call count for all function calls.
   __ SmiToPtrArrayOffset(r7, r5);
   __ AddP(r7, r4, r7);
 
   __ LoadP(r6, FieldMemOperand(r7, count_offset));
   __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
   __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
-
-  __ bind(&done);
 }
 
 void CallConstructStub::Generate(MacroAssembler* masm) {
@@ -1873,6 +1862,17 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot, Register temp) {
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+  __ SmiToPtrArrayOffset(temp, slot);
+  __ AddP(feedback_vector, feedback_vector, temp);
+  __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
+  __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
+  __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
+}
+
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // r3 - function
   // r5 - slot id
@@ -1885,12 +1885,7 @@
   __ mov(r2, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ SmiToPtrArrayOffset(r7, r5);
-  __ AddP(r4, r4, r7);
-  __ LoadP(r5, FieldMemOperand(r4, count_offset));
-  __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
-  __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
+  IncrementCallCount(masm, r4, r5, r1);
 
   __ LoadRR(r4, r6);
   __ LoadRR(r5, r3);
@@ -1902,7 +1897,7 @@
   // r3 - function
   // r5 - slot id (Smi)
   // r4 - vector
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1933,13 +1928,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(r3, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ LoadP(r5, FieldMemOperand(r8, count_offset));
-  __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
-  __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, r4, r5, r1);
+
   __ mov(r2, Operand(argc));
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1979,6 +1972,11 @@
   __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, r4, r5, r1);
+
+  __ bind(&call_count_incremented);
   __ mov(r2, Operand(argc));
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -2005,10 +2003,6 @@
   __ CmpP(r6, ip);
   __ bne(&miss);
 
-  // Initialize the call counter.
-  __ LoadSmiLiteral(r7, Smi::FromInt(1));
-  __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
-
   // Store the function. Use a stub since we need a frame for allocation.
   // r4 - vector
   // r5 - slot
@@ -2016,9 +2010,13 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(r4);
+    __ Push(r5);
     __ Push(cp, r3);
     __ CallStub(&create_stub);
     __ Pop(cp, r3);
+    __ Pop(r5);
+    __ Pop(r4);
   }
 
   __ b(&call_function);
@@ -2028,7 +2026,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ b(&call);
+  __ b(&call_count_incremented);
 }
 
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
@@ -2204,297 +2202,6 @@
   __ bind(&done);
 }
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  lr: return address
-  //  sp[0]: to
-  //  sp[4]: from
-  //  sp[8]: string
-
-  // This stub is called from the native-call %_SubString(...), so
-  // nothing can be assumed about the arguments. It is tested that:
-  //  "string" is a sequential string,
-  //  both "from" and "to" are smis, and
-  //  0 <= from <= to <= string.length.
-  // If any of these assumptions fail, we call the runtime system.
-
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
-
-  __ LoadP(r4, MemOperand(sp, kToOffset));
-  __ LoadP(r5, MemOperand(sp, kFromOffset));
-
-  // If either to or from had the smi tag bit set, then fail to generic runtime
-  __ JumpIfNotSmi(r4, &runtime);
-  __ JumpIfNotSmi(r5, &runtime);
-  __ SmiUntag(r4);
-  __ SmiUntag(r5);
-  // Both r4 and r5 are untagged integers.
-
-  // We want to bailout to runtime here if From is negative.
-  __ blt(&runtime);  // From < 0.
-
-  __ CmpLogicalP(r5, r4);
-  __ bgt(&runtime);  // Fail if from > to.
-  __ SubP(r4, r4, r5);
-
-  // Make sure first argument is a string.
-  __ LoadP(r2, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(r2, &runtime);
-  Condition is_string = masm->IsObjectStringType(r2, r3);
-  __ b(NegateCondition(is_string), &runtime);
-
-  Label single_char;
-  __ CmpP(r4, Operand(1));
-  __ b(eq, &single_char);
-
-  // Short-cut for the case of trivial substring.
-  Label return_r2;
-  // r2: original string
-  // r4: result string length
-  __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset));
-  __ SmiUntag(r0, r6);
-  __ CmpLogicalP(r4, r0);
-  // Return original string.
-  __ beq(&return_r2);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ bgt(&runtime);
-  // Shorter than original string's length: an actual substring.
-
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into r7.
-  // r2: original string
-  // r3: instance type
-  // r4: length
-  // r5: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ mov(r0, Operand(kIsIndirectStringMask));
-  __ AndP(r0, r3);
-  __ beq(&seq_or_external_string);
-
-  __ mov(r0, Operand(kSlicedNotConsMask));
-  __ AndP(r0, r3);
-  __ bne(&sliced_string);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset));
-  __ CompareRoot(r7, Heap::kempty_stringRootIndex);
-  __ bne(&runtime);
-  __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset));
-  // Update instance type.
-  __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
-  __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ b(&underlying_unpacked);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
-  __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset));
-  __ SmiUntag(r3, r6);
-  __ AddP(r5, r3);  // Add offset to index.
-  // Update instance type.
-  __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
-  __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ b(&underlying_unpacked);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ LoadRR(r7, r2);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // r7: underlying subject string
-    // r3: instance type of underlying subject string
-    // r4: length
-    // r5: adjusted start index (untagged)
-    __ CmpP(r4, Operand(SlicedString::kMinLength));
-    // Short slice.  Copy instead of slicing.
-    __ blt(&copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ mov(r0, Operand(kStringEncodingMask));
-    __ AndP(r0, r3);
-    __ beq(&two_byte_slice);
-    __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime);
-    __ b(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime);
-    __ bind(&set_slice_header);
-    __ SmiTag(r5);
-    __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
-    __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset));
-    __ b(&return_r2);
-
-    __ bind(&copy_routine);
-  }
-
-  // r7: underlying subject string
-  // r3: instance type of underlying subject string
-  // r4: length
-  // r5: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ mov(r0, Operand(kExternalStringTag));
-  __ AndP(r0, r3);
-  __ beq(&sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ mov(r0, Operand(kShortExternalStringTag));
-  __ AndP(r0, r3);
-  __ bne(&runtime);
-  __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset));
-  // r7 already points to the first character of underlying string.
-  __ b(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ mov(r0, Operand(kStringEncodingMask));
-  __ AndP(r0, r3);
-  __ beq(&two_byte_sequential);
-
-  // Allocate and copy the resulting one-byte string.
-  __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime);
-
-  // Locate first character of substring to copy.
-  __ AddP(r7, r5);
-  // Locate first character of result.
-  __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // r2: result string
-  // r3: first character of result string
-  // r4: result string length
-  // r7: first character of substring to copy
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
-                                       String::ONE_BYTE_ENCODING);
-  __ b(&return_r2);
-
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime);
-
-  // Locate first character of substring to copy.
-  __ ShiftLeftP(r3, r5, Operand(1));
-  __ AddP(r7, r3);
-  // Locate first character of result.
-  __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // r2: result string.
-  // r3: first character of result.
-  // r4: result length.
-  // r7: first character of substring to copy.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
-                                       String::TWO_BYTE_ENCODING);
-
-  __ bind(&return_r2);
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1, r5, r6);
-  __ Drop(3);
-  __ Ret();
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // r2: original string
-  // r3: instance type
-  // r4: length
-  // r5: from index (untagged)
-  __ SmiTag(r5, r5);
-  StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
-                                  RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ Drop(3);
-  __ Ret();
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in r2.
-  Label done;
-  Label is_number;
-  __ JumpIfSmi(r2, &is_number);
-
-  __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
-  // r2: receiver
-  // r3: receiver instance type
-  __ blt(&done);
-
-  Label not_heap_number;
-  __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
-  __ bne(&not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpP(r3, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball);
-  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r2);  // Push argument.
-  __ TailCallRuntime(Runtime::kToString);
-
-  __ bind(&done);
-  __ Ret();
-}
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in r2.
-  Label is_number;
-  __ JumpIfSmi(r2, &is_number);
-
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE);
-  // r2: receiver
-  // r3: receiver instance type
-  __ Ret(le);
-
-  Label not_heap_number;
-  __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
-  __ bne(&not_heap_number);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpP(r3, Operand(ODDBALL_TYPE));
-  __ bne(&not_oddball);
-  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ push(r2);  // Push argument.
-  __ TailCallRuntime(Runtime::kToName);
-}
 
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
@@ -3357,18 +3064,6 @@
   Label need_incremental;
   Label need_incremental_pop_scratch;
 
-  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
-  __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
-  __ LoadP(
-      regs_.scratch1(),
-      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
-  __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1));
-  __ StoreP(
-      regs_.scratch1(),
-      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
-  __ CmpP(regs_.scratch1(), Operand::Zero());  // S390, we could do better here
-  __ blt(&need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3785,7 +3480,7 @@
   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
 
   // Load the map into the correct register.
-  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
   __ LoadRR(feedback, too_far);
 
   __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4521,7 +4216,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+    __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
     __ bgt(&too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4896,7 +4591,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+  __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
   __ bgt(&too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
index 4cdcd54..ca40a0c 100644
--- a/src/s390/interface-descriptors-s390.cc
+++ b/src/s390/interface-descriptors-s390.cc
@@ -38,11 +38,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
-
-const Register StoreTransitionDescriptor::MapRegister() { return r5; }
+const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
+const Register StoreTransitionDescriptor::MapRegister() { return r7; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
@@ -324,7 +322,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       r2,  // callee
@@ -359,7 +357,19 @@
       r2,  // argument count (not including receiver)
       r5,  // new target
       r3,  // constructor to call
-      r4   // address of the first argument
+      r4,  // allocation site feedback if available, undefined otherwise
+      r6   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // argument count (not including receiver)
+      r3,  // target to call checked to be Array function
+      r4,  // allocation site feedback if available, undefined otherwise
+      r5   // address of the first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
index 8b708de..769d3dc 100644
--- a/src/s390/macro-assembler-s390.cc
+++ b/src/s390/macro-assembler-s390.cc
@@ -251,10 +251,7 @@
 void MacroAssembler::InNewSpace(Register object, Register scratch,
                                 Condition cond, Label* branch) {
   DCHECK(cond == eq || cond == ne);
-  // TODO(joransiu): check if we can merge mov Operand into AndP.
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cond, branch);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
 }
 
 void MacroAssembler::RecordWriteField(
@@ -1709,7 +1706,7 @@
 void MacroAssembler::Allocate(int object_size, Register result,
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
@@ -1965,7 +1962,7 @@
 void MacroAssembler::FastAllocate(int object_size, Register result,
                                   Register scratch1, Register scratch2,
                                   AllocationFlags flags) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
 
   // Make object size into bytes.
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
index b8ed3a0..7f2d042 100644
--- a/src/s390/macro-assembler-s390.h
+++ b/src/s390/macro-assembler-s390.h
@@ -194,6 +194,18 @@
   void Ret() { b(r14); }
   void Ret(Condition cond) { b(cond, r14); }
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp.
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 0) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count);
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
index 91db782..78bc939 100644
--- a/src/s390/simulator-s390.cc
+++ b/src/s390/simulator-s390.cc
@@ -660,8 +660,8 @@
   last_debugger_input_ = input;
 }
 
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
-                            size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+                            void* start_addr, size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
   start -= intra_line;
@@ -681,7 +681,8 @@
   }
 }
 
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                   void* page) {
   base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
@@ -691,7 +692,8 @@
 }
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+                             intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -703,7 +705,8 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+                            Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -1469,7 +1472,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new base::HashMap(&ICacheMatch);
+    i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -1609,7 +1612,8 @@
 };
 
 // static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+                         Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
     for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
diff --git a/src/s390/simulator-s390.h b/src/s390/simulator-s390.h
index 7af00ee..1ce6bf7 100644
--- a/src/s390/simulator-s390.h
+++ b/src/s390/simulator-s390.h
@@ -211,7 +211,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(base::HashMap* i_cache, Redirection* first);
+  static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -233,7 +233,8 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+  static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+                          size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -445,9 +446,12 @@
   void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
 
   // ICache.
-  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
-  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+  static void CheckICache(base::CustomMatcherHashMap* i_cache,
+                          Instruction* instr);
+  static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+                                 void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -482,7 +486,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  base::HashMap* i_cache_;
+  base::CustomMatcherHashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index fbb0152..e0e9d95 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -8,7 +8,7 @@
 #include "src/allocation.h"
 #include "src/heap/heap.h"
 #include "src/v8memory.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/signature.h b/src/signature.h
index 3fa5f82..97238b6 100644
--- a/src/signature.h
+++ b/src/signature.h
@@ -5,7 +5,7 @@
 #ifndef V8_SIGNATURE_H_
 #define V8_SIGNATURE_H_
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/small-pointer-list.h b/src/small-pointer-list.h
index 9ece249..ac5ecaa 100644
--- a/src/small-pointer-list.h
+++ b/src/small-pointer-list.h
@@ -7,7 +7,7 @@
 
 #include "src/base/logging.h"
 #include "src/globals.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 8d2f5d9..16044a5 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -98,6 +98,10 @@
     UNREACHABLE();
   }
 
+  if (ElideObject(obj)) {
+    return SerializeObject(*isolate()->factory()->undefined_value(),
+                           how_to_code, where_to_point, skip);
+  }
   // Past this point we should not see any (context-specific) maps anymore.
   CHECK(!obj->IsMap());
   // There should be no references to the global object embedded.
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index e82a7d5..b3c54d1 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -36,6 +36,7 @@
     UNREACHABLE();
   }
 
+  virtual bool ElideObject(Object* obj) { return false; }
   void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
                         WhereToPoint where_to_point);
 
@@ -73,6 +74,8 @@
     }
   }
 
+  bool ElideObject(Object* obj) override { return obj->IsWeakCell(); };
+
  private:
   WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
       : CodeSerializer(isolate, source_hash) {}
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index 7a2df28..b90a2c5 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -414,7 +414,7 @@
     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
     Executability exec = static_cast<Executability>(source_.Get());
     AllocationResult result = lo_space->AllocateRaw(size, exec);
-    HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
+    HeapObject* obj = result.ToObjectChecked();
     deserialized_large_objects_.Add(obj);
     return obj->address();
   } else if (space_index == MAP_SPACE) {
diff --git a/src/snapshot/natives.h b/src/snapshot/natives.h
index e447515..a9dc306 100644
--- a/src/snapshot/natives.h
+++ b/src/snapshot/natives.h
@@ -22,8 +22,15 @@
   TEST
 };
 
+// Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED
+// since definition of methods of classes marked as dllimport is not allowed.
 template <NativeType type>
+#ifdef USING_V8_SHARED
 class NativesCollection {
+#else
+class V8_EXPORT_PRIVATE NativesCollection {
+#endif  // USING_V8_SHARED
+
  public:
   // The following methods are implemented in js2c-generated code:
 
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index bb3cc5c..adfd6e4 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -14,7 +14,7 @@
 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
   map_ = isolate->external_reference_map();
   if (map_ != NULL) return;
-  map_ = new base::HashMap(base::HashMap::PointersMatch);
+  map_ = new base::HashMap();
   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
   for (int i = 0; i < table->size(); ++i) {
     Address addr = table->address(i);
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index d7a7f89..f622a5b 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -403,9 +403,8 @@
         ExternalTwoByteString::cast(string)->resource()->data());
   }
 
-  AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
-                              ? LO_SPACE
-                              : OLD_SPACE;
+  AllocationSpace space =
+      (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
   SerializePrologue(space, allocation_size, map);
 
   // Output the rest of the imaginary string.
diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h
index ff2c6a9..0f87774 100644
--- a/src/snapshot/serializer.h
+++ b/src/snapshot/serializer.h
@@ -38,7 +38,7 @@
  private:
   class NameMap {
    public:
-    NameMap() : impl_(base::HashMap::PointersMatch) {}
+    NameMap() : impl_() {}
 
     ~NameMap() {
       for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index fed45d1..959ac56 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -31,19 +31,6 @@
   return index < num_contexts;
 }
 
-
-uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
-  DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
-  if (!isolate->snapshot_available()) {
-    return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
-  }
-  uint32_t size;
-  int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
-  memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
-  return size;
-}
-
-
 bool Snapshot::Initialize(Isolate* isolate) {
   if (!isolate->snapshot_available()) return false;
   base::ElapsedTimer timer;
@@ -89,25 +76,8 @@
   return Handle<Context>::cast(result);
 }
 
-void UpdateMaxRequirementPerPage(
-    uint32_t* requirements,
-    Vector<const SerializedData::Reservation> reservations) {
-  int space = 0;
-  uint32_t current_requirement = 0;
-  for (const auto& reservation : reservations) {
-    current_requirement += reservation.chunk_size();
-    if (reservation.is_last()) {
-      requirements[space] = std::max(requirements[space], current_requirement);
-      current_requirement = 0;
-      space++;
-    }
-  }
-  DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
-}
-
-void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
-                             const List<SnapshotData*>* context_snapshots,
-                             uint32_t* sizes_out) {
+void ProfileDeserialization(const SnapshotData* startup_snapshot,
+                            const List<SnapshotData*>* context_snapshots) {
   if (FLAG_profile_deserialization) {
     int startup_total = 0;
     PrintF("Deserialization will reserve:\n");
@@ -123,36 +93,6 @@
       PrintF("%10d bytes per context #%d\n", context_total, i);
     }
   }
-
-  uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
-  uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
-  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
-    startup_requirements[space] = 0;
-    context_requirements[space] = 0;
-  }
-
-  UpdateMaxRequirementPerPage(startup_requirements,
-                              startup_snapshot->Reservations());
-  for (const auto& context_snapshot : *context_snapshots) {
-    UpdateMaxRequirementPerPage(context_requirements,
-                                context_snapshot->Reservations());
-  }
-
-  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
-    // If the space requirement for a page is less than a page size, we consider
-    // limiting the size of the first page in order to save memory on startup.
-    uint32_t required = startup_requirements[space] +
-                        2 * context_requirements[space] +
-                        Page::kObjectStartOffset;
-    // Add a small allowance to the code space for small scripts.
-    if (space == CODE_SPACE) required += 32 * KB;
-
-    if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
-      uint32_t max_size =
-          MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
-      sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
-    }
-  }
 }
 
 v8::StartupData Snapshot::CreateSnapshotBlob(
@@ -166,13 +106,9 @@
     total_length += context_snapshot->RawData().length();
   }
 
-  uint32_t first_page_sizes[kNumPagedSpaces];
-  CalculateFirstPageSizes(startup_snapshot, context_snapshots,
-                          first_page_sizes);
+  ProfileDeserialization(startup_snapshot, context_snapshots);
 
   char* data = new char[total_length];
-  memcpy(data + kFirstPageSizesOffset, first_page_sizes,
-         kNumPagedSpaces * kInt32Size);
   memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
   int payload_offset = StartupSnapshotOffset(num_contexts);
   int payload_length = startup_snapshot->RawData().length();
diff --git a/src/snapshot/snapshot.h b/src/snapshot/snapshot.h
index a541592..49a6092 100644
--- a/src/snapshot/snapshot.h
+++ b/src/snapshot/snapshot.h
@@ -67,9 +67,6 @@
 
   static bool EmbedsScript(Isolate* isolate);
 
-  static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
-
-
   // To be implemented by the snapshot source.
   static const v8::StartupData* DefaultSnapshotBlob();
 
@@ -88,21 +85,16 @@
                                                int index);
 
   // Snapshot blob layout:
-  // [0 - 5] pre-calculated first page sizes for paged spaces
-  // [6] number of contexts N
-  // [7] offset to context 0
-  // [8] offset to context 1
+  // [0] number of contexts N
+  // [1] offset to context 0
+  // [2] offset to context 1
   // ...
   // ... offset to context N - 1
   // ... startup snapshot data
   // ... context 0 snapshot data
   // ... context 1 snapshot data
 
-  static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-
-  static const int kFirstPageSizesOffset = 0;
-  static const int kNumberOfContextsOffset =
-      kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
+  static const int kNumberOfContextsOffset = 0;
   static const int kFirstContextOffsetOffset =
       kNumberOfContextsOffset + kInt32Size;
 
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
index cc66f71..9c1c3b9 100644
--- a/src/snapshot/startup-serializer.h
+++ b/src/snapshot/startup-serializer.h
@@ -32,8 +32,7 @@
  private:
   class PartialCacheIndexMap : public AddressMapBase {
    public:
-    PartialCacheIndexMap()
-        : map_(base::HashMap::PointersMatch), next_index_(0) {}
+    PartialCacheIndexMap() : map_(), next_index_(0) {}
 
     // Lookup object in the map. Return its index if found, or create
     // a new entry with new_index as value, and return kInvalidIndex.
diff --git a/src/source-position-table.h b/src/source-position-table.h
index 76ae4a0..74c3b9e 100644
--- a/src/source-position-table.h
+++ b/src/source-position-table.h
@@ -8,7 +8,7 @@
 #include "src/assert-scope.h"
 #include "src/checks.h"
 #include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/tracing/trace-event.cc b/src/tracing/trace-event.cc
index 3e0a0fa..440af19 100644
--- a/src/tracing/trace-event.cc
+++ b/src/tracing/trace-event.cc
@@ -6,6 +6,7 @@
 
 #include <string.h>
 
+#include "src/counters.h"
 #include "src/isolate.h"
 #include "src/v8.h"
 
@@ -26,9 +27,11 @@
     v8::internal::tracing::AddTraceEvent(
         TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
         v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
-        v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_COPY,
-        "runtime-call-stat",
-        TRACE_STR_COPY(p_data_->isolate->trace_event_stats_table()->Dump()));
+        v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE,
+        "runtime-call-stats", TRACE_STR_COPY(p_data_->isolate->counters()
+                                                 ->runtime_call_stats()
+                                                 ->Dump()
+                                                 .c_str()));
   } else {
     v8::internal::tracing::AddTraceEvent(
         TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
@@ -37,14 +40,14 @@
   }
 }
 
-void CallStatsScopedTracer::Initialize(Isolate* isolate,
+void CallStatsScopedTracer::Initialize(v8::internal::Isolate* isolate,
                                        const uint8_t* category_group_enabled,
                                        const char* name) {
   data_.isolate = isolate;
   data_.category_group_enabled = category_group_enabled;
   data_.name = name;
   p_data_ = &data_;
-  TraceEventStatsTable* table = isolate->trace_event_stats_table();
+  RuntimeCallStats* table = isolate->counters()->runtime_call_stats();
   has_parent_scope_ = table->InUse();
   if (!has_parent_scope_) table->Reset();
   v8::internal::tracing::AddTraceEvent(
@@ -53,88 +56,6 @@
       TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId);
 }
 
-void TraceEventStatsTable::Enter(Isolate* isolate,
-                                 TraceEventCallStatsTimer* timer,
-                                 CounterId counter_id) {
-  TraceEventStatsTable* table = isolate->trace_event_stats_table();
-  RuntimeCallCounter* counter = &(table->*counter_id);
-  timer->Start(counter, table->current_timer_);
-  table->current_timer_ = timer;
-}
-
-void TraceEventStatsTable::Leave(Isolate* isolate,
-                                 TraceEventCallStatsTimer* timer) {
-  TraceEventStatsTable* table = isolate->trace_event_stats_table();
-  if (table->current_timer_ == timer) {
-    table->current_timer_ = timer->Stop();
-  }
-}
-
-void TraceEventStatsTable::Reset() {
-  in_use_ = true;
-  current_timer_ = nullptr;
-
-#define RESET_COUNTER(name) this->name.Reset();
-  FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
-  FOR_EACH_INTRINSIC(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Builtin_##name.Reset();
-  BUILTIN_LIST_C(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->API_##name.Reset();
-  FOR_EACH_API_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Handler_##name.Reset();
-  FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-}
-
-const char* TraceEventStatsTable::Dump() {
-  buffer_.str(std::string());
-  buffer_.clear();
-  buffer_ << "{";
-#define DUMP_COUNTER(name) \
-  if (this->name.count > 0) this->name.Dump(buffer_);
-  FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name, nargs, result_size) \
-  if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
-  FOR_EACH_INTRINSIC(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
-  BUILTIN_LIST_C(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
-  FOR_EACH_API_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
-  if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
-  FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-  buffer_ << "\"END\":[]}";
-  const std::string& buffer_str = buffer_.str();
-  size_t length = buffer_str.size();
-  if (length > len_) {
-    buffer_c_str_.reset(new char[length + 1]);
-    len_ = length;
-  }
-  strncpy(buffer_c_str_.get(), buffer_str.c_str(), length + 1);
-  in_use_ = false;
-  return buffer_c_str_.get();
-}
-
 }  // namespace tracing
 }  // namespace internal
 }  // namespace v8
diff --git a/src/tracing/trace-event.h b/src/tracing/trace-event.h
index 25ccd80..35d2e15 100644
--- a/src/tracing/trace-event.h
+++ b/src/tracing/trace-event.h
@@ -6,12 +6,12 @@
 #define SRC_TRACING_TRACE_EVENT_H_
 
 #include <stddef.h>
+#include <memory>
 
 #include "base/trace_event/common/trace_event_common.h"
 #include "include/v8-platform.h"
 #include "src/base/atomicops.h"
 #include "src/base/macros.h"
-#include "src/counters.h"
 
 // This header file defines implementation details of how the trace macros in
 // trace_event_common.h collect and store trace events. Anything not
@@ -121,8 +121,7 @@
 //                    const uint8_t* arg_types,
 //                    const uint64_t* arg_values,
 //                    unsigned int flags)
-#define TRACE_EVENT_API_ADD_TRACE_EVENT \
-  v8::internal::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+#define TRACE_EVENT_API_ADD_TRACE_EVENT v8::internal::tracing::AddTraceEventImpl
 
 // Set the duration field of a COMPLETE trace event.
 // void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
@@ -281,7 +280,7 @@
     uint64_t cid_;                                                         \
   };                                                                       \
   INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
-  INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
 
 #define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \
   base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled)
@@ -289,9 +288,6 @@
 #define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
   INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
 
-#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) \
-  INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id)
-
 #define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)  \
   {                                                                            \
     INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(                                    \
@@ -309,13 +305,11 @@
                     name);                                                     \
   }
 
-#define INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate,    \
-                                                               counter_id) \
-  v8::internal::tracing::CounterScope INTERNAL_TRACE_EVENT_UID(scope)(     \
-      isolate, counter_id);
-
 namespace v8 {
 namespace internal {
+
+class Isolate;
+
 namespace tracing {
 
 // Specify these values when the corresponding argument of AddTraceEvent is not
@@ -460,6 +454,28 @@
   const char* str_;
 };
 
+static V8_INLINE uint64_t AddTraceEventImpl(
+    char phase, const uint8_t* category_group_enabled, const char* name,
+    const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+    const char** arg_names, const uint8_t* arg_types,
+    const uint64_t* arg_values, unsigned int flags) {
+  std::unique_ptr<ConvertableToTraceFormat> arg_convertables[2];
+  if (num_args > 0 && arg_types[0] == TRACE_VALUE_TYPE_CONVERTABLE) {
+    arg_convertables[0].reset(reinterpret_cast<ConvertableToTraceFormat*>(
+        static_cast<intptr_t>(arg_values[0])));
+  }
+  if (num_args > 1 && arg_types[1] == TRACE_VALUE_TYPE_CONVERTABLE) {
+    arg_convertables[1].reset(reinterpret_cast<ConvertableToTraceFormat*>(
+        static_cast<intptr_t>(arg_values[1])));
+  }
+  DCHECK(num_args <= 2);
+  v8::Platform* platform =
+      v8::internal::tracing::TraceEventHelper::GetCurrentPlatform();
+  return platform->AddTraceEvent(phase, category_group_enabled, name, scope, id,
+                                 bind_id, num_args, arg_names, arg_types,
+                                 arg_values, arg_convertables, flags);
+}
+
 // Define SetTraceValue for each allowed type. It stores the type and
 // value in the return arguments. This allows this API to avoid declaring any
 // structures so that it is portable to third_party libraries.
@@ -500,6 +516,19 @@
 #undef INTERNAL_DECLARE_SET_TRACE_VALUE
 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
 
+static V8_INLINE void SetTraceValue(ConvertableToTraceFormat* convertable_value,
+                                    unsigned char* type, uint64_t* value) {
+  *type = TRACE_VALUE_TYPE_CONVERTABLE;
+  *value = static_cast<uint64_t>(reinterpret_cast<intptr_t>(convertable_value));
+}
+
+template <typename T>
+static V8_INLINE typename std::enable_if<
+    std::is_convertible<T*, ConvertableToTraceFormat*>::value>::type
+SetTraceValue(std::unique_ptr<T> ptr, unsigned char* type, uint64_t* value) {
+  SetTraceValue(ptr.release(), type, value);
+}
+
 // These AddTraceEvent template
 // function is defined here instead of in the macro, because the arg_values
 // could be temporary objects, such as std::string. In order to store
@@ -512,36 +541,38 @@
                                         uint64_t id, uint64_t bind_id,
                                         unsigned int flags) {
   return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
-                                         scope, id, bind_id, kZeroNumArgs, NULL,
-                                         NULL, NULL, flags);
+                                         scope, id, bind_id, kZeroNumArgs,
+                                         nullptr, nullptr, nullptr, flags);
 }
 
 template <class ARG1_TYPE>
 static V8_INLINE uint64_t AddTraceEvent(
     char phase, const uint8_t* category_group_enabled, const char* name,
     const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
-    const char* arg1_name, const ARG1_TYPE& arg1_val) {
+    const char* arg1_name, ARG1_TYPE&& arg1_val) {
   const int num_args = 1;
-  uint8_t arg_types[1];
-  uint64_t arg_values[1];
-  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  uint8_t arg_type;
+  uint64_t arg_value;
+  SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_type, &arg_value);
   return TRACE_EVENT_API_ADD_TRACE_EVENT(
       phase, category_group_enabled, name, scope, id, bind_id, num_args,
-      &arg1_name, arg_types, arg_values, flags);
+      &arg1_name, &arg_type, &arg_value, flags);
 }
 
 template <class ARG1_TYPE, class ARG2_TYPE>
 static V8_INLINE uint64_t AddTraceEvent(
     char phase, const uint8_t* category_group_enabled, const char* name,
     const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
-    const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
-    const ARG2_TYPE& arg2_val) {
+    const char* arg1_name, ARG1_TYPE&& arg1_val, const char* arg2_name,
+    ARG2_TYPE&& arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = {arg1_name, arg2_name};
   unsigned char arg_types[2];
   uint64_t arg_values[2];
-  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
-  SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+  SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_types[0],
+                &arg_values[0]);
+  SetTraceValue(std::forward<ARG2_TYPE>(arg2_val), &arg_types[1],
+                &arg_values[1]);
   return TRACE_EVENT_API_ADD_TRACE_EVENT(
       phase, category_group_enabled, name, scope, id, bind_id, num_args,
       arg_names, arg_types, arg_values, flags);
@@ -634,136 +665,21 @@
     }
   }
 
-  void Initialize(Isolate* isolate, const uint8_t* category_group_enabled,
-                  const char* name);
+  void Initialize(v8::internal::Isolate* isolate,
+                  const uint8_t* category_group_enabled, const char* name);
 
  private:
   void AddEndTraceEvent();
   struct Data {
     const uint8_t* category_group_enabled;
     const char* name;
-    Isolate* isolate;
+    v8::internal::Isolate* isolate;
   };
   bool has_parent_scope_;
   Data* p_data_;
   Data data_;
 };
 
-// TraceEventCallStatsTimer is used to keep track of the stack of currently
-// active timers used for properly measuring the own time of a
-// RuntimeCallCounter.
-class TraceEventCallStatsTimer {
- public:
-  TraceEventCallStatsTimer() : counter_(nullptr), parent_(nullptr) {}
-  RuntimeCallCounter* counter() { return counter_; }
-  base::ElapsedTimer timer() { return timer_; }
-
- private:
-  friend class TraceEventStatsTable;
-
-  V8_INLINE void Start(RuntimeCallCounter* counter,
-                       TraceEventCallStatsTimer* parent) {
-    counter_ = counter;
-    parent_ = parent;
-    timer_.Start();
-  }
-
-  V8_INLINE TraceEventCallStatsTimer* Stop() {
-    base::TimeDelta delta = timer_.Elapsed();
-    timer_.Stop();
-    counter_->count++;
-    counter_->time += delta;
-    if (parent_ != nullptr) {
-      // Adjust parent timer so that it does not include sub timer's time.
-      parent_->counter_->time -= delta;
-    }
-    return parent_;
-  }
-
-  RuntimeCallCounter* counter_;
-  TraceEventCallStatsTimer* parent_;
-  base::ElapsedTimer timer_;
-};
-
-class TraceEventStatsTable {
- public:
-  typedef RuntimeCallCounter TraceEventStatsTable::*CounterId;
-
-#define CALL_RUNTIME_COUNTER(name) \
-  RuntimeCallCounter name = RuntimeCallCounter(#name);
-  FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
-  RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
-  FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
-  RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
-  BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
-  RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name);
-  FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
-  RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name);
-  FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-
-  // Starting measuring the time for a function. This will establish the
-  // connection to the parent counter for properly calculating the own times.
-  static void Enter(Isolate* isolate, TraceEventCallStatsTimer* timer,
-                    CounterId counter_id);
-
-  // Leave a scope for a measured runtime function. This will properly add
-  // the time delta to the current_counter and subtract the delta from its
-  // parent.
-  static void Leave(Isolate* isolate, TraceEventCallStatsTimer* timer);
-
-  void Reset();
-  const char* Dump();
-
-  TraceEventStatsTable() {
-    Reset();
-    in_use_ = false;
-  }
-
-  TraceEventCallStatsTimer* current_timer() { return current_timer_; }
-  bool InUse() { return in_use_; }
-
- private:
-  std::stringstream buffer_;
-  std::unique_ptr<char[]> buffer_c_str_;
-  size_t len_ = 0;
-  // Counter to track recursive time events.
-  TraceEventCallStatsTimer* current_timer_ = nullptr;
-  bool in_use_;
-};
-
-class CounterScope {
- public:
-  CounterScope(Isolate* isolate, TraceEventStatsTable::CounterId counter_id)
-      : isolate_(nullptr) {
-    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())) {
-      isolate_ = isolate;
-      TraceEventStatsTable::Enter(isolate_, &timer_, counter_id);
-    }
-  }
-  ~CounterScope() {
-    // A non-nullptr isolate_ means the stats table already entered the scope
-    // and started the timer, we need to leave the scope and reset the timer
-    // even when we stop tracing, otherwise we have the risk to have a dangling
-    // pointer.
-    if (V8_UNLIKELY(isolate_ != nullptr)) {
-      TraceEventStatsTable::Leave(isolate_, &timer_);
-    }
-  }
-
- private:
-  Isolate* isolate_;
-  TraceEventCallStatsTimer timer_;
-};
-
 }  // namespace tracing
 }  // namespace internal
 }  // namespace v8
diff --git a/src/type-cache.h b/src/type-cache.h
deleted file mode 100644
index f83f3bd..0000000
--- a/src/type-cache.h
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPE_CACHE_H_
-#define V8_TYPE_CACHE_H_
-
-#include "src/types.h"
-
-namespace v8 {
-namespace internal {
-
-class TypeCache final {
- private:
-  // This has to be first for the initialization magic to work.
-  base::AccountingAllocator allocator;
-  Zone zone_;
-
- public:
-  static TypeCache const& Get();
-
-  TypeCache() : zone_(&allocator) {}
-
-  Type* const kInt8 =
-      CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
-  Type* const kUint8 =
-      CreateNative(CreateRange<uint8_t>(), Type::UntaggedIntegral8());
-  Type* const kUint8Clamped = kUint8;
-  Type* const kInt16 =
-      CreateNative(CreateRange<int16_t>(), Type::UntaggedIntegral16());
-  Type* const kUint16 =
-      CreateNative(CreateRange<uint16_t>(), Type::UntaggedIntegral16());
-  Type* const kInt32 =
-      CreateNative(Type::Signed32(), Type::UntaggedIntegral32());
-  Type* const kUint32 =
-      CreateNative(Type::Unsigned32(), Type::UntaggedIntegral32());
-  Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
-  Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
-
-  Type* const kSmi = CreateNative(Type::SignedSmall(), Type::TaggedSigned());
-  Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
-  Type* const kHeapNumber = CreateNative(Type::Number(), Type::TaggedPointer());
-
-  Type* const kSingletonZero = CreateRange(0.0, 0.0);
-  Type* const kSingletonOne = CreateRange(1.0, 1.0);
-  Type* const kSingletonTen = CreateRange(10.0, 10.0);
-  Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
-  Type* const kZeroOrUndefined =
-      Type::Union(kSingletonZero, Type::Undefined(), zone());
-  Type* const kTenOrUndefined =
-      Type::Union(kSingletonTen, Type::Undefined(), zone());
-  Type* const kMinusOneOrZero = CreateRange(-1.0, 0.0);
-  Type* const kMinusOneToOne = CreateRange(-1.0, 1.0);
-  Type* const kZeroOrOne = CreateRange(0.0, 1.0);
-  Type* const kZeroOrOneOrNaN = Type::Union(kZeroOrOne, Type::NaN(), zone());
-  Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
-  Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
-  Type* const kZeroish =
-      Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
-  Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
-  Type* const kIntegerOrMinusZero =
-      Type::Union(kInteger, Type::MinusZero(), zone());
-  Type* const kIntegerOrMinusZeroOrNaN =
-      Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
-  Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
-  Type* const kPositiveIntegerOrMinusZero =
-      Type::Union(kPositiveInteger, Type::MinusZero(), zone());
-  Type* const kPositiveIntegerOrMinusZeroOrNaN =
-      Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
-
-  Type* const kAdditiveSafeInteger =
-      CreateRange(-4503599627370496.0, 4503599627370496.0);
-  Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
-  Type* const kAdditiveSafeIntegerOrMinusZero =
-      Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
-  Type* const kSafeIntegerOrMinusZero =
-      Type::Union(kSafeInteger, Type::MinusZero(), zone());
-  Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
-
-  Type* const kUntaggedUndefined =
-      Type::Intersect(Type::Undefined(), Type::Untagged(), zone());
-
-  // Asm.js related types.
-  Type* const kAsmSigned = kInt32;
-  Type* const kAsmUnsigned = kUint32;
-  Type* const kAsmInt = Type::Union(kAsmSigned, kAsmUnsigned, zone());
-  Type* const kAsmFixnum = Type::Intersect(kAsmSigned, kAsmUnsigned, zone());
-  Type* const kAsmFloat = kFloat32;
-  Type* const kAsmDouble = kFloat64;
-  Type* const kAsmFloatQ = Type::Union(kAsmFloat, kUntaggedUndefined, zone());
-  Type* const kAsmDoubleQ = Type::Union(kAsmDouble, kUntaggedUndefined, zone());
-  // Not part of the Asm.js type hierarchy, but represents a part of what
-  // intish encompasses.
-  Type* const kAsmIntQ = Type::Union(kAsmInt, kUntaggedUndefined, zone());
-  Type* const kAsmFloatDoubleQ = Type::Union(kAsmFloatQ, kAsmDoubleQ, zone());
-  // Asm.js size unions.
-  Type* const kAsmSize8 = Type::Union(kInt8, kUint8, zone());
-  Type* const kAsmSize16 = Type::Union(kInt16, kUint16, zone());
-  Type* const kAsmSize32 =
-      Type::Union(Type::Union(kInt32, kUint32, zone()), kAsmFloat, zone());
-  Type* const kAsmSize64 = kFloat64;
-  // Asm.js other types.
-  Type* const kAsmComparable = Type::Union(
-      kAsmSigned,
-      Type::Union(kAsmUnsigned, Type::Union(kAsmDouble, kAsmFloat, zone()),
-                  zone()),
-      zone());
-  Type* const kAsmIntArrayElement =
-      Type::Union(Type::Union(kInt8, kUint8, zone()),
-                  Type::Union(Type::Union(kInt16, kUint16, zone()),
-                              Type::Union(kInt32, kUint32, zone()), zone()),
-                  zone());
-
-  // The FixedArray::length property always containts a smi in the range
-  // [0, FixedArray::kMaxLength].
-  Type* const kFixedArrayLengthType = CreateNative(
-      CreateRange(0.0, FixedArray::kMaxLength), Type::TaggedSigned());
-
-  // The FixedDoubleArray::length property always containts a smi in the range
-  // [0, FixedDoubleArray::kMaxLength].
-  Type* const kFixedDoubleArrayLengthType = CreateNative(
-      CreateRange(0.0, FixedDoubleArray::kMaxLength), Type::TaggedSigned());
-
-  // The JSArray::length property always contains a tagged number in the range
-  // [0, kMaxUInt32].
-  Type* const kJSArrayLengthType =
-      CreateNative(Type::Unsigned32(), Type::Tagged());
-
-  // The JSTyped::length property always contains a tagged number in the range
-  // [0, kMaxSmiValue].
-  Type* const kJSTypedArrayLengthType =
-      CreateNative(Type::UnsignedSmall(), Type::TaggedSigned());
-
-  // The String::length property always contains a smi in the range
-  // [0, String::kMaxLength].
-  Type* const kStringLengthType =
-      CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
-  Type* const k##TypeName##Array = CreateArray(k##TypeName);
-  TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
- private:
-  Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
-
-  Type* CreateArrayFunction(Type* array) {
-    Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
-    Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
-    Type* arg3 = arg2;
-    return Type::Function(array, arg1, arg2, arg3, zone());
-  }
-
-  Type* CreateNative(Type* semantic, Type* representation) {
-    return Type::Intersect(semantic, representation, zone());
-  }
-
-  template <typename T>
-  Type* CreateRange() {
-    return CreateRange(std::numeric_limits<T>::min(),
-                       std::numeric_limits<T>::max());
-  }
-
-  Type* CreateRange(double min, double max) {
-    return Type::Range(min, max, zone());
-  }
-
-  Zone* zone() { return &zone_; }
-};
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TYPE_CACHE_H_
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
index 771021f..f70f018 100644
--- a/src/type-feedback-vector-inl.h
+++ b/src/type-feedback-vector-inl.h
@@ -5,6 +5,7 @@
 #ifndef V8_TYPE_FEEDBACK_VECTOR_INL_H_
 #define V8_TYPE_FEEDBACK_VECTOR_INL_H_
 
+#include "src/globals.h"
 #include "src/type-feedback-vector.h"
 
 namespace v8 {
@@ -52,7 +53,13 @@
 int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
   DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
   DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
-  return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
+  if (kind == FeedbackVectorSlotKind::GENERAL ||
+      kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC ||
+      kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+    return 1;
+  }
+
+  return 2;
 }
 
 bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
@@ -65,6 +72,8 @@
     case FeedbackVectorSlotKind::KEYED_LOAD_IC:
     case FeedbackVectorSlotKind::STORE_IC:
     case FeedbackVectorSlotKind::KEYED_STORE_IC:
+    case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+    case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
     case FeedbackVectorSlotKind::GENERAL:
     case FeedbackVectorSlotKind::INVALID:
       return false;
@@ -77,22 +86,20 @@
 }
 
 bool TypeFeedbackVector::is_empty() const {
-  if (length() == 0) return true;
-  DCHECK(length() > kReservedIndexCount);
-  return false;
+  return length() == kReservedIndexCount;
 }
 
-
 int TypeFeedbackVector::slot_count() const {
-  if (length() == 0) return 0;
-  DCHECK(length() > kReservedIndexCount);
   return length() - kReservedIndexCount;
 }
 
 
 TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
-  return is_empty() ? TypeFeedbackMetadata::cast(GetHeap()->empty_fixed_array())
-                    : TypeFeedbackMetadata::cast(get(kMetadataIndex));
+  return TypeFeedbackMetadata::cast(get(kMetadataIndex));
+}
+
+int TypeFeedbackVector::invocation_count() const {
+  return Smi::cast(get(kInvocationCountIndex))->value();
 }
 
 // Conversion from an integer index to either a slot or an ic slot.
@@ -113,23 +120,93 @@
   set(GetIndex(slot), value, mode);
 }
 
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+  switch (type_feedback) {
+    case BinaryOperationFeedback::kNone:
+      return BinaryOperationHint::kNone;
+    case BinaryOperationFeedback::kSignedSmall:
+      return BinaryOperationHint::kSignedSmall;
+    case BinaryOperationFeedback::kNumber:
+      return BinaryOperationHint::kNumberOrOddball;
+    case BinaryOperationFeedback::kString:
+      return BinaryOperationHint::kString;
+    case BinaryOperationFeedback::kAny:
+    default:
+      return BinaryOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return BinaryOperationHint::kNone;
+}
 
-void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
+// Helper function to transform the feedback to CompareOperationHint.
+CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
+  switch (type_feedback) {
+    case CompareOperationFeedback::kNone:
+      return CompareOperationHint::kNone;
+    case CompareOperationFeedback::kSignedSmall:
+      return CompareOperationHint::kSignedSmall;
+    case CompareOperationFeedback::kNumber:
+      return CompareOperationHint::kNumber;
+    default:
+      return CompareOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return CompareOperationHint::kNone;
+}
+
+void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
+                                       int* vector_ic_count,
+                                       bool code_is_interpreted) {
   Object* uninitialized_sentinel =
       TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
   Object* megamorphic_sentinel =
       *TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
   int with = 0;
   int gen = 0;
+  int total = 0;
   TypeFeedbackMetadataIterator iter(metadata());
   while (iter.HasNext()) {
     FeedbackVectorSlot slot = iter.Next();
     FeedbackVectorSlotKind kind = iter.kind();
 
     Object* obj = Get(slot);
-    if (obj != uninitialized_sentinel &&
-        kind != FeedbackVectorSlotKind::GENERAL) {
-      if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+    if (kind == FeedbackVectorSlotKind::GENERAL) {
+      continue;
+    }
+    total++;
+
+    if (obj != uninitialized_sentinel) {
+      if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
+          kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
+        // If we are not running interpreted code, we need to ignore
+        // the special ic slots for binaryop/compare used by the
+        // interpreter.
+        // TODO(mvstanton): Remove code_is_interpreted when full code
+        // is retired from service.
+        if (!code_is_interpreted) continue;
+
+        DCHECK(obj->IsSmi());
+        int op_feedback = static_cast<int>(Smi::cast(obj)->value());
+        if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+          CompareOperationHint hint =
+              CompareOperationHintFromFeedback(op_feedback);
+          if (hint == CompareOperationHint::kAny) {
+            gen++;
+          } else if (hint != CompareOperationHint::kNone) {
+            with++;
+          }
+        } else {
+          DCHECK(kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
+          BinaryOperationHint hint =
+              BinaryOperationHintFromFeedback(op_feedback);
+          if (hint == BinaryOperationHint::kAny) {
+            gen++;
+          } else if (hint != BinaryOperationHint::kNone) {
+            with++;
+          }
+        }
+      } else if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
         with++;
       } else if (obj == megamorphic_sentinel) {
         gen++;
@@ -139,6 +216,7 @@
 
   *with_type_info = with;
   *generic = gen;
+  *vector_ic_count = total;
 }
 
 Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index 61f5e8b..30bc2d4 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -102,9 +102,7 @@
 
   Handle<UnseededNumberDictionary> names;
   if (name_count) {
-    names = UnseededNumberDictionary::New(
-        isolate, base::bits::RoundUpToPowerOfTwo32(name_count), TENURED,
-        USE_CUSTOM_MINIMUM_CAPACITY);
+    names = UnseededNumberDictionary::New(isolate, name_count, TENURED);
   }
 
   int name_index = 0;
@@ -114,7 +112,10 @@
     if (SlotRequiresName(kind)) {
       Handle<String> name = spec->GetName(name_index);
       DCHECK(!name.is_null());
-      names = UnseededNumberDictionary::AtNumberPut(names, i, name);
+      Handle<UnseededNumberDictionary> new_names =
+          UnseededNumberDictionary::AtNumberPut(names, i, name);
+      DCHECK_EQ(*new_names, *names);
+      names = new_names;
       name_index++;
     }
   }
@@ -202,6 +203,10 @@
       return "STORE_IC";
     case FeedbackVectorSlotKind::KEYED_STORE_IC:
       return "KEYED_STORE_IC";
+    case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+      return "INTERPRETER_BINARYOP_IC";
+    case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
+      return "INTERPRETER_COMPARE_IC";
     case FeedbackVectorSlotKind::GENERAL:
       return "STUB";
     case FeedbackVectorSlotKind::KINDS_NUMBER:
@@ -230,11 +235,13 @@
   const int slot_count = metadata->slot_count();
   const int length = slot_count + kReservedIndexCount;
   if (length == kReservedIndexCount) {
-    return Handle<TypeFeedbackVector>::cast(factory->empty_fixed_array());
+    return Handle<TypeFeedbackVector>::cast(
+        factory->empty_type_feedback_vector());
   }
 
   Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
   array->set(kMetadataIndex, *metadata);
+  array->set(kInvocationCountIndex, Smi::FromInt(0));
 
   DisallowHeapAllocation no_gc;
 
@@ -250,12 +257,18 @@
     Object* value;
     if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
       value = *factory->empty_weak_cell();
+    } else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
+               kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
+      value = Smi::FromInt(0);
     } else {
       value = *uninitialized_sentinel;
     }
     array->set(index, value, SKIP_WRITE_BARRIER);
+
+    value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::FromInt(0)
+                                                    : *uninitialized_sentinel;
     for (int j = 1; j < entry_size; j++) {
-      array->set(index + j, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+      array->set(index + j, value, SKIP_WRITE_BARRIER);
     }
     i += entry_size;
   }
@@ -334,6 +347,13 @@
           nexus.Clear(shared->code());
           break;
         }
+        case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+        case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+          DCHECK(Get(slot)->IsSmi());
+          // don't clear these smi slots.
+          // Set(slot, Smi::FromInt(0));
+          break;
+        }
         case FeedbackVectorSlotKind::GENERAL: {
           if (obj->IsHeapObject()) {
             InstanceType instance_type =
@@ -620,16 +640,25 @@
 
 int CallICNexus::ExtractCallCount() {
   Object* call_count = GetFeedbackExtra();
-  if (call_count->IsSmi()) {
-    int value = Smi::cast(call_count)->value();
-    return value;
-  }
-  return -1;
+  CHECK(call_count->IsSmi());
+  int value = Smi::cast(call_count)->value();
+  return value;
 }
 
+float CallICNexus::ComputeCallFrequency() {
+  double const invocation_count = vector()->invocation_count();
+  double const call_count = ExtractCallCount();
+  return static_cast<float>(call_count / invocation_count);
+}
 
 void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
 
+void CallICNexus::ConfigureUninitialized() {
+  Isolate* isolate = GetIsolate();
+  SetFeedback(*TypeFeedbackVector::UninitializedSentinel(isolate),
+              SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+}
 
 void CallICNexus::ConfigureMonomorphicArray() {
   Object* feedback = GetFeedback();
@@ -650,10 +679,13 @@
 
 
 void CallICNexus::ConfigureMegamorphic() {
-  FeedbackNexus::ConfigureMegamorphic();
+  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+              SKIP_WRITE_BARRIER);
+  Smi* count = Smi::cast(GetFeedbackExtra());
+  int new_count = count->value() + 1;
+  SetFeedbackExtra(Smi::FromInt(new_count), SKIP_WRITE_BARRIER);
 }
 
-
 void CallICNexus::ConfigureMegamorphic(int call_count) {
   SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
@@ -1020,5 +1052,38 @@
   }
   return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
 }
+
+InlineCacheState BinaryOpICNexus::StateFromFeedback() const {
+  BinaryOperationHint hint = GetBinaryOperationFeedback();
+  if (hint == BinaryOperationHint::kNone) {
+    return UNINITIALIZED;
+  } else if (hint == BinaryOperationHint::kAny) {
+    return GENERIC;
+  }
+
+  return MONOMORPHIC;
+}
+
+InlineCacheState CompareICNexus::StateFromFeedback() const {
+  CompareOperationHint hint = GetCompareOperationFeedback();
+  if (hint == CompareOperationHint::kNone) {
+    return UNINITIALIZED;
+  } else if (hint == CompareOperationHint::kAny) {
+    return GENERIC;
+  }
+
+  return MONOMORPHIC;
+}
+
+BinaryOperationHint BinaryOpICNexus::GetBinaryOperationFeedback() const {
+  int feedback = Smi::cast(GetFeedback())->value();
+  return BinaryOperationHintFromFeedback(feedback);
+}
+
+CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
+  int feedback = Smi::cast(GetFeedback())->value();
+  return CompareOperationHintFromFeedback(feedback);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index 5355ee7..af69499 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -10,7 +10,8 @@
 #include "src/base/logging.h"
 #include "src/elements-kind.h"
 #include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/type-hints.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -27,6 +28,8 @@
   KEYED_LOAD_IC,
   STORE_IC,
   KEYED_STORE_IC,
+  INTERPRETER_BINARYOP_IC,
+  INTERPRETER_COMPARE_IC,
 
   // This is a general purpose slot that occupies one feedback vector element.
   GENERAL,
@@ -67,6 +70,14 @@
     return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
   }
 
+  FeedbackVectorSlot AddInterpreterBinaryOpICSlot() {
+    return AddSlot(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
+  }
+
+  FeedbackVectorSlot AddInterpreterCompareICSlot() {
+    return AddSlot(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC);
+  }
+
   FeedbackVectorSlot AddGeneralSlot() {
     return AddSlot(FeedbackVectorSlotKind::GENERAL);
   }
@@ -207,7 +218,7 @@
   static const char* Kind2String(FeedbackVectorSlotKind kind);
 
  private:
-  static const int kFeedbackVectorSlotKindBits = 4;
+  static const int kFeedbackVectorSlotKindBits = 5;
   STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
                 (1 << kFeedbackVectorSlotKindBits));
 
@@ -222,11 +233,10 @@
 
 // The shape of the TypeFeedbackVector is an array with:
 // 0: feedback metadata
-// 1: ics_with_types
-// 2: ics_with_generic_info
-// 3: feedback slot #0
+// 1: invocation count
+// 2: feedback slot #0
 // ...
-// 3 + slot_count - 1: feedback slot #(slot_count-1)
+// 2 + slot_count - 1: feedback slot #(slot_count-1)
 //
 class TypeFeedbackVector : public FixedArray {
  public:
@@ -234,9 +244,11 @@
   static inline TypeFeedbackVector* cast(Object* obj);
 
   static const int kMetadataIndex = 0;
-  static const int kReservedIndexCount = 1;
+  static const int kInvocationCountIndex = 1;
+  static const int kReservedIndexCount = 2;
 
-  inline void ComputeCounts(int* with_type_info, int* generic);
+  inline void ComputeCounts(int* with_type_info, int* generic,
+                            int* vector_ic_count, bool code_is_interpreted);
 
   inline bool is_empty() const;
 
@@ -244,6 +256,7 @@
   inline int slot_count() const;
 
   inline TypeFeedbackMetadata* metadata() const;
+  inline int invocation_count() const;
 
   // Conversion from a slot to an integer index to the underlying array.
   static int GetIndex(FeedbackVectorSlot slot) {
@@ -461,6 +474,7 @@
 
   void Clear(Code* host);
 
+  void ConfigureUninitialized() override;
   void ConfigureMonomorphicArray();
   void ConfigureMonomorphic(Handle<JSFunction> function);
   void ConfigureMegamorphic() final;
@@ -481,6 +495,10 @@
   }
 
   int ExtractCallCount();
+
+  // Compute the call frequency based on the call count and the invocation
+  // count (taken from the type feedback vector).
+  float ComputeCallFrequency();
 };
 
 
@@ -548,6 +566,10 @@
       : FeedbackNexus(vector, slot) {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
   }
+  explicit KeyedLoadICNexus(Isolate* isolate)
+      : FeedbackNexus(
+            TypeFeedbackVector::DummyVector(isolate),
+            FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)) {}
   KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
       : FeedbackNexus(vector, slot) {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
@@ -630,6 +652,72 @@
   InlineCacheState StateFromFeedback() const override;
   Name* FindFirstName() const override;
 };
+
+class BinaryOpICNexus final : public FeedbackNexus {
+ public:
+  BinaryOpICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+              vector->GetKind(slot));
+  }
+  BinaryOpICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+              vector->GetKind(slot));
+  }
+
+  void Clear(Code* host);
+
+  InlineCacheState StateFromFeedback() const final;
+  BinaryOperationHint GetBinaryOperationFeedback() const;
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // BinaryOpICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+};
+
+class CompareICNexus final : public FeedbackNexus {
+ public:
+  CompareICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+              vector->GetKind(slot));
+  }
+  CompareICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+              vector->GetKind(slot));
+  }
+
+  void Clear(Code* host);
+
+  InlineCacheState StateFromFeedback() const final;
+  CompareOperationHint GetCompareOperationFeedback() const;
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // BinaryOpICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+};
+
+inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/compiler/type-hints.cc b/src/type-hints.cc
similarity index 95%
rename from src/compiler/type-hints.cc
rename to src/type-hints.cc
index a07a870..ff00eef 100644
--- a/src/compiler/type-hints.cc
+++ b/src/type-hints.cc
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/type-hints.h"
+#include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
-namespace compiler {
 
 std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
   switch (hint) {
@@ -18,6 +17,8 @@
       return os << "Signed32";
     case BinaryOperationHint::kNumberOrOddball:
       return os << "NumberOrOddball";
+    case BinaryOperationHint::kString:
+      return os << "String";
     case BinaryOperationHint::kAny:
       return os << "Any";
   }
@@ -86,6 +87,5 @@
   return os;
 }
 
-}  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-hints.h b/src/type-hints.h
similarity index 90%
rename from src/compiler/type-hints.h
rename to src/type-hints.h
index ad94491..cdf4709 100644
--- a/src/compiler/type-hints.h
+++ b/src/type-hints.h
@@ -2,15 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_TYPE_HINTS_H_
-#define V8_COMPILER_TYPE_HINTS_H_
+#ifndef V8_TYPE_HINTS_H_
+#define V8_TYPE_HINTS_H_
 
 #include "src/base/flags.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
-namespace compiler {
 
 // Type hints for an binary operation.
 enum class BinaryOperationHint : uint8_t {
@@ -18,6 +17,7 @@
   kSignedSmall,
   kSigned32,
   kNumberOrOddball,
+  kString,
   kAny
 };
 
@@ -66,8 +66,7 @@
 
 DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
 
-}  // namespace compiler
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_COMPILER_TYPE_HINTS_H_
+#endif  // V8_TYPE_HINTS_H_
diff --git a/src/type-info.cc b/src/type-info.cc
index 8289d91..ce0ab6c 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -6,7 +6,6 @@
 
 #include "src/ast/ast.h"
 #include "src/code-stubs.h"
-#include "src/compiler.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
 #include "src/objects-inl.h"
@@ -192,58 +191,129 @@
   return Handle<AllocationSite>::null();
 }
 
+namespace {
 
-void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
-                                     Type** left_type,
-                                     Type** right_type,
-                                     Type** combined_type) {
+AstType* CompareOpHintToType(CompareOperationHint hint) {
+  switch (hint) {
+    case CompareOperationHint::kNone:
+      return AstType::None();
+    case CompareOperationHint::kSignedSmall:
+      return AstType::SignedSmall();
+    case CompareOperationHint::kNumber:
+      return AstType::Number();
+    case CompareOperationHint::kNumberOrOddball:
+      return AstType::NumberOrOddball();
+    case CompareOperationHint::kAny:
+      return AstType::Any();
+  }
+  UNREACHABLE();
+  return AstType::None();
+}
+
+AstType* BinaryOpHintToType(BinaryOperationHint hint) {
+  switch (hint) {
+    case BinaryOperationHint::kNone:
+      return AstType::None();
+    case BinaryOperationHint::kSignedSmall:
+      return AstType::SignedSmall();
+    case BinaryOperationHint::kSigned32:
+      return AstType::Signed32();
+    case BinaryOperationHint::kNumberOrOddball:
+      return AstType::Number();
+    case BinaryOperationHint::kString:
+      return AstType::String();
+    case BinaryOperationHint::kAny:
+      return AstType::Any();
+  }
+  UNREACHABLE();
+  return AstType::None();
+}
+
+}  // end anonymous namespace
+
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
+                                     AstType** left_type, AstType** right_type,
+                                     AstType** combined_type) {
   Handle<Object> info = GetInfo(id);
+  // A check for a valid slot is not sufficient here. InstanceOf collects
+  // type feedback in a General slot.
   if (!info->IsCode()) {
-    // For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
-    *left_type = *right_type = *combined_type = Type::None();
+    // For some comparisons we don't have type feedback, e.g.
+    // LiteralCompareTypeof.
+    *left_type = *right_type = *combined_type = AstType::None();
     return;
   }
-  Handle<Code> code = Handle<Code>::cast(info);
 
+  // Feedback from Ignition. The feedback slot will be allocated and initialized
+  // to AstType::None() even when ignition is not enabled. So it is safe to get
+  // feedback from the type feedback vector.
+  DCHECK(!slot.IsInvalid());
+  CompareICNexus nexus(feedback_vector_, slot);
+  *left_type = *right_type = *combined_type =
+      CompareOpHintToType(nexus.GetCompareOperationFeedback());
+
+  // Merge the feedback from full-codegen if available.
+  Handle<Code> code = Handle<Code>::cast(info);
   Handle<Map> map;
   Map* raw_map = code->FindFirstMap();
   if (raw_map != NULL) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
 
   if (code->is_compare_ic_stub()) {
     CompareICStub stub(code->stub_key(), isolate());
-    *left_type = CompareICState::StateToType(zone(), stub.left());
-    *right_type = CompareICState::StateToType(zone(), stub.right());
-    *combined_type = CompareICState::StateToType(zone(), stub.state(), map);
+    AstType* left_type_from_ic =
+        CompareICState::StateToType(zone(), stub.left());
+    *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
+    AstType* right_type_from_ic =
+        CompareICState::StateToType(zone(), stub.right());
+    *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
+    AstType* combined_type_from_ic =
+        CompareICState::StateToType(zone(), stub.state(), map);
+    *combined_type =
+        AstType::Union(*combined_type, combined_type_from_ic, zone());
   }
 }
 
-
-void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
-                                    Type** left,
-                                    Type** right,
-                                    Type** result,
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
+                                    AstType** left, AstType** right,
+                                    AstType** result,
                                     Maybe<int>* fixed_right_arg,
                                     Handle<AllocationSite>* allocation_site,
                                     Token::Value op) {
   Handle<Object> object = GetInfo(id);
-  if (!object->IsCode()) {
-    // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
-    // operations covered by the BinaryOpIC we should always have them.
+  if (slot.IsInvalid()) {
+    // For some binary ops we don't have ICs or feedback slots,
+    // e.g. Token::COMMA, but for the operations covered by the BinaryOpIC we
+    // should always have them.
+    DCHECK(!object->IsCode());
     DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
            op > BinaryOpICState::LAST_TOKEN);
-    *left = *right = *result = Type::None();
+    *left = *right = *result = AstType::None();
     *fixed_right_arg = Nothing<int>();
     *allocation_site = Handle<AllocationSite>::null();
     return;
   }
+
+  // Feedback from Ignition. The feedback slot will be allocated and initialized
+  // to AstType::None() even when ignition is not enabled. So it is safe to get
+  // feedback from the type feedback vector.
+  DCHECK(!slot.IsInvalid());
+  BinaryOpICNexus nexus(feedback_vector_, slot);
+  *left = *right = *result =
+      BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+  *fixed_right_arg = Nothing<int>();
+  *allocation_site = Handle<AllocationSite>::null();
+
+  if (!object->IsCode()) return;
+
+  // Merge the feedback from full-codegen if available.
   Handle<Code> code = Handle<Code>::cast(object);
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(isolate(), code->extra_ic_state());
   DCHECK_EQ(op, state.op());
 
-  *left = state.GetLeftType();
-  *right = state.GetRightType();
-  *result = state.GetResultType();
+  *left = AstType::Union(*left, state.GetLeftType(), zone());
+  *right = AstType::Union(*right, state.GetRightType(), zone());
+  *result = AstType::Union(*result, state.GetResultType(), zone());
   *fixed_right_arg = state.fixed_right_arg();
 
   AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -254,14 +324,24 @@
   }
 }
 
-
-Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
+AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id,
+                                       FeedbackVectorSlot slot) {
   Handle<Object> object = GetInfo(id);
-  if (!object->IsCode()) return Type::None();
+  if (slot.IsInvalid()) {
+    DCHECK(!object->IsCode());
+    return AstType::None();
+  }
+
+  DCHECK(!slot.IsInvalid());
+  BinaryOpICNexus nexus(feedback_vector_, slot);
+  AstType* type = BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+
+  if (!object->IsCode()) return type;
+
   Handle<Code> code = Handle<Code>::cast(object);
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(isolate(), code->extra_ic_state());
-  return state.GetLeftType();
+  return AstType::Union(type, state.GetLeftType(), zone());
 }
 
 
diff --git a/src/type-info.h b/src/type-info.h
index 4e8dc54..06a0c9e 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -6,11 +6,11 @@
 #define V8_TYPE_INFO_H_
 
 #include "src/allocation.h"
+#include "src/ast/ast-types.h"
 #include "src/contexts.h"
 #include "src/globals.h"
 #include "src/parsing/token.h"
-#include "src/types.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -77,20 +77,16 @@
   uint16_t ToBooleanTypes(TypeFeedbackId id);
 
   // Get type information for arithmetic operations and compares.
-  void BinaryType(TypeFeedbackId id,
-                  Type** left,
-                  Type** right,
-                  Type** result,
+  void BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+                  AstType** right, AstType** result,
                   Maybe<int>* fixed_right_arg,
                   Handle<AllocationSite>* allocation_site,
                   Token::Value operation);
 
-  void CompareType(TypeFeedbackId id,
-                   Type** left,
-                   Type** right,
-                   Type** combined);
+  void CompareType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+                   AstType** right, AstType** combined);
 
-  Type* CountType(TypeFeedbackId id);
+  AstType* CountType(TypeFeedbackId id, FeedbackVectorSlot slot);
 
   Zone* zone() const { return zone_; }
   Isolate* isolate() const { return isolate_; }
diff --git a/src/types.h b/src/types.h
deleted file mode 100644
index 746cca7..0000000
--- a/src/types.h
+++ /dev/null
@@ -1,982 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPES_H_
-#define V8_TYPES_H_
-
-#include "src/conversions.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-
-// SUMMARY
-//
-// A simple type system for compiler-internal use. It is based entirely on
-// union types, and all subtyping hence amounts to set inclusion. Besides the
-// obvious primitive types and some predefined unions, the type language also
-// can express class types (a.k.a. specific maps) and singleton types (i.e.,
-// concrete constants).
-//
-// Types consist of two dimensions: semantic (value range) and representation.
-// Both are related through subtyping.
-//
-//
-// SEMANTIC DIMENSION
-//
-// The following equations and inequations hold for the semantic axis:
-//
-//   None <= T
-//   T <= Any
-//
-//   Number = Signed32 \/ Unsigned32 \/ Double
-//   Smi <= Signed32
-//   Name = String \/ Symbol
-//   UniqueName = InternalizedString \/ Symbol
-//   InternalizedString < String
-//
-//   Receiver = Object \/ Proxy
-//   Array < Object
-//   Function < Object
-//   RegExp < Object
-//   OtherUndetectable < Object
-//   DetectableReceiver = Receiver - OtherUndetectable
-//
-//   Class(map) < T   iff instance_type(map) < T
-//   Constant(x) < T  iff instance_type(map(x)) < T
-//   Array(T) < Array
-//   Function(R, S, T0, T1, ...) < Function
-//   Context(T) < Internal
-//
-// Both structural Array and Function types are invariant in all parameters;
-// relaxing this would make Union and Intersect operations more involved.
-// There is no subtyping relation between Array, Function, or Context types
-// and respective Constant types, since these types cannot be reconstructed
-// for arbitrary heap values.
-// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
-// change! (Its instance type cannot, however.)
-// TODO(rossberg): the latter is not currently true for proxies, because of fix,
-// but will hold once we implement direct proxies.
-// However, we also define a 'temporal' variant of the subtyping relation that
-// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
-//
-//
-// REPRESENTATIONAL DIMENSION
-//
-// For the representation axis, the following holds:
-//
-//   None <= R
-//   R <= Any
-//
-//   UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
-//                 UntaggedInt16 \/ UntaggedInt32
-//   UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
-//   UntaggedNumber = UntaggedInt \/ UntaggedFloat
-//   Untagged = UntaggedNumber \/ UntaggedPtr
-//   Tagged = TaggedInt \/ TaggedPtr
-//
-// Subtyping relates the two dimensions, for example:
-//
-//   Number <= Tagged \/ UntaggedNumber
-//   Object <= TaggedPtr \/ UntaggedPtr
-//
-// That holds because the semantic type constructors defined by the API create
-// types that allow for all possible representations, and dually, the ones for
-// representation types initially include all semantic ranges. Representations
-// can then e.g. be narrowed for a given semantic type using intersection:
-//
-//   SignedSmall /\ TaggedInt       (a 'smi')
-//   Number /\ TaggedPtr            (a heap number)
-//
-//
-// RANGE TYPES
-//
-// A range type represents a continuous integer interval by its minimum and
-// maximum value.  Either value may be an infinity, in which case that infinity
-// itself is also included in the range.   A range never contains NaN or -0.
-//
-// If a value v happens to be an integer n, then Constant(v) is considered a
-// subtype of Range(n, n) (and therefore also a subtype of any larger range).
-// In order to avoid large unions, however, it is usually a good idea to use
-// Range rather than Constant.
-//
-//
-// PREDICATES
-//
-// There are two main functions for testing types:
-//
-//   T1->Is(T2)     -- tests whether T1 is included in T2 (i.e., T1 <= T2)
-//   T1->Maybe(T2)  -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
-//
-// Typically, the former is to be used to select representations (e.g., via
-// T->Is(SignedSmall())), and the latter to check whether a specific case needs
-// handling (e.g., via T->Maybe(Number())).
-//
-// There is no functionality to discover whether a type is a leaf in the
-// lattice. That is intentional. It should always be possible to refine the
-// lattice (e.g., splitting up number types further) without invalidating any
-// existing assumptions or tests.
-// Consequently, do not normally use Equals for type tests, always use Is!
-//
-// The NowIs operator implements state-sensitive subtying, as described above.
-// Any compilation decision based on such temporary properties requires runtime
-// guarding!
-//
-//
-// PROPERTIES
-//
-// Various formal properties hold for constructors, operators, and predicates
-// over types. For example, constructors are injective and subtyping is a
-// complete partial order.
-//
-// See test/cctest/test-types.cc for a comprehensive executable specification,
-// especially with respect to the properties of the more exotic 'temporal'
-// constructors and predicates (those prefixed 'Now').
-//
-//
-// IMPLEMENTATION
-//
-// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
-// respective map. Only structured types require allocation.
-// Note that the bitset representation is closed under both Union and Intersect.
-
-
-// -----------------------------------------------------------------------------
-// Values for bitset types
-
-// clang-format off
-
-#define MASK_BITSET_TYPE_LIST(V) \
-  V(Representation, 0xffc00000u) \
-  V(Semantic,       0x003ffffeu)
-
-#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
-#define SEMANTIC(k)       ((k) & BitsetType::kSemantic)
-
-#define REPRESENTATION_BITSET_TYPE_LIST(V)    \
-  V(None,               0)                    \
-  V(UntaggedBit,        1u << 22 | kSemantic) \
-  V(UntaggedIntegral8,  1u << 23 | kSemantic) \
-  V(UntaggedIntegral16, 1u << 24 | kSemantic) \
-  V(UntaggedIntegral32, 1u << 25 | kSemantic) \
-  V(UntaggedFloat32,    1u << 26 | kSemantic) \
-  V(UntaggedFloat64,    1u << 27 | kSemantic) \
-  V(UntaggedSimd128,    1u << 28 | kSemantic) \
-  V(UntaggedPointer,    1u << 29 | kSemantic) \
-  V(TaggedSigned,       1u << 30 | kSemantic) \
-  V(TaggedPointer,      1u << 31 | kSemantic) \
-  \
-  V(UntaggedIntegral,   kUntaggedBit | kUntaggedIntegral8 |        \
-                        kUntaggedIntegral16 | kUntaggedIntegral32) \
-  V(UntaggedFloat,      kUntaggedFloat32 | kUntaggedFloat64)       \
-  V(UntaggedNumber,     kUntaggedIntegral | kUntaggedFloat)        \
-  V(Untagged,           kUntaggedNumber | kUntaggedPointer)        \
-  V(Tagged,             kTaggedSigned | kTaggedPointer)
-
-#define INTERNAL_BITSET_TYPE_LIST(V)                                      \
-  V(OtherUnsigned31, 1u << 1 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherUnsigned32, 1u << 2 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherSigned32,   1u << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherNumber,     1u << 4 | REPRESENTATION(kTagged | kUntaggedNumber))
-
-#define SEMANTIC_BITSET_TYPE_LIST(V) \
-  V(Negative31,          1u << 5  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(Null,                1u << 6  | REPRESENTATION(kTaggedPointer)) \
-  V(Undefined,           1u << 7  | REPRESENTATION(kTaggedPointer)) \
-  V(Boolean,             1u << 8  | REPRESENTATION(kTaggedPointer)) \
-  V(Unsigned30,          1u << 9  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(MinusZero,           1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(NaN,                 1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(Symbol,              1u << 12 | REPRESENTATION(kTaggedPointer)) \
-  V(InternalizedString,  1u << 13 | REPRESENTATION(kTaggedPointer)) \
-  V(OtherString,         1u << 14 | REPRESENTATION(kTaggedPointer)) \
-  V(Simd,                1u << 15 | REPRESENTATION(kTaggedPointer)) \
-  V(OtherObject,         1u << 17 | REPRESENTATION(kTaggedPointer)) \
-  V(OtherUndetectable,   1u << 16 | REPRESENTATION(kTaggedPointer)) \
-  V(Proxy,               1u << 18 | REPRESENTATION(kTaggedPointer)) \
-  V(Function,            1u << 19 | REPRESENTATION(kTaggedPointer)) \
-  V(Hole,                1u << 20 | REPRESENTATION(kTaggedPointer)) \
-  V(OtherInternal,       1u << 21 | REPRESENTATION(kTagged | kUntagged)) \
-  \
-  V(Signed31,                   kUnsigned30 | kNegative31) \
-  V(Signed32,                   kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
-  V(Signed32OrMinusZero,        kSigned32 | kMinusZero) \
-  V(Signed32OrMinusZeroOrNaN,   kSigned32 | kMinusZero | kNaN) \
-  V(Negative32,                 kNegative31 | kOtherSigned32) \
-  V(Unsigned31,                 kUnsigned30 | kOtherUnsigned31) \
-  V(Unsigned32,                 kUnsigned30 | kOtherUnsigned31 | \
-                                kOtherUnsigned32) \
-  V(Unsigned32OrMinusZero,      kUnsigned32 | kMinusZero) \
-  V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
-  V(Integral32,                 kSigned32 | kUnsigned32) \
-  V(PlainNumber,                kIntegral32 | kOtherNumber) \
-  V(OrderedNumber,              kPlainNumber | kMinusZero) \
-  V(MinusZeroOrNaN,             kMinusZero | kNaN) \
-  V(Number,                     kOrderedNumber | kNaN) \
-  V(String,                     kInternalizedString | kOtherString) \
-  V(UniqueName,                 kSymbol | kInternalizedString) \
-  V(Name,                       kSymbol | kString) \
-  V(BooleanOrNumber,            kBoolean | kNumber) \
-  V(BooleanOrNullOrNumber,      kBooleanOrNumber | kNull) \
-  V(BooleanOrNullOrUndefined,   kBoolean | kNull | kUndefined) \
-  V(NullOrNumber,               kNull | kNumber) \
-  V(NullOrUndefined,            kNull | kUndefined) \
-  V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
-  V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
-  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
-  V(NumberOrString,             kNumber | kString) \
-  V(NumberOrUndefined,          kNumber | kUndefined) \
-  V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
-  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
-  V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
-  V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
-  V(Receiver,                   kObject | kProxy) \
-  V(StringOrReceiver,           kString | kReceiver) \
-  V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
-                                kReceiver) \
-  V(Internal,                   kHole | kOtherInternal) \
-  V(NonInternal,                kPrimitive | kReceiver) \
-  V(NonNumber,                  kUnique | kString | kInternal) \
-  V(Any,                        0xfffffffeu)
-
-// clang-format on
-
-/*
- * The following diagrams show how integers (in the mathematical sense) are
- * divided among the different atomic numerical types.
- *
- *   ON    OS32     N31     U30     OU31    OU32     ON
- * ______[_______[_______[_______[_______[_______[_______
- *     -2^31   -2^30     0      2^30    2^31    2^32
- *
- * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
- *
- * Some of the atomic numerical bitsets are internal only (see
- * INTERNAL_BITSET_TYPE_LIST).  To a types user, they should only occur in
- * union with certain other bitsets.  For instance, OtherNumber should only
- * occur as part of PlainNumber.
- */
-
-#define PROPER_BITSET_TYPE_LIST(V) \
-  REPRESENTATION_BITSET_TYPE_LIST(V) \
-  SEMANTIC_BITSET_TYPE_LIST(V)
-
-#define BITSET_TYPE_LIST(V)          \
-  MASK_BITSET_TYPE_LIST(V)           \
-  REPRESENTATION_BITSET_TYPE_LIST(V) \
-  INTERNAL_BITSET_TYPE_LIST(V)       \
-  SEMANTIC_BITSET_TYPE_LIST(V)
-
-class Type;
-
-// -----------------------------------------------------------------------------
-// Bitset types (internal).
-
-class BitsetType {
- public:
-  typedef uint32_t bitset;  // Internal
-
-  enum : uint32_t {
-#define DECLARE_TYPE(type, value) k##type = (value),
-    BITSET_TYPE_LIST(DECLARE_TYPE)
-#undef DECLARE_TYPE
-        kUnusedEOL = 0
-  };
-
-  static bitset SignedSmall();
-  static bitset UnsignedSmall();
-
-  bitset Bitset() {
-    return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
-  }
-
-  static bool IsInhabited(bitset bits) {
-    return SEMANTIC(bits) != kNone && REPRESENTATION(bits) != kNone;
-  }
-
-  static bool SemanticIsInhabited(bitset bits) {
-    return SEMANTIC(bits) != kNone;
-  }
-
-  static bool Is(bitset bits1, bitset bits2) {
-    return (bits1 | bits2) == bits2;
-  }
-
-  static double Min(bitset);
-  static double Max(bitset);
-
-  static bitset Glb(Type* type);  // greatest lower bound that's a bitset
-  static bitset Glb(double min, double max);
-  static bitset Lub(Type* type);  // least upper bound that's a bitset
-  static bitset Lub(i::Map* map);
-  static bitset Lub(i::Object* value);
-  static bitset Lub(double value);
-  static bitset Lub(double min, double max);
-  static bitset ExpandInternals(bitset bits);
-
-  static const char* Name(bitset);
-  static void Print(std::ostream& os, bitset);  // NOLINT
-#ifdef DEBUG
-  static void Print(bitset);
-#endif
-
-  static bitset NumberBits(bitset bits);
-
-  static bool IsBitset(Type* type) {
-    return reinterpret_cast<uintptr_t>(type) & 1;
-  }
-
-  static Type* NewForTesting(bitset bits) { return New(bits); }
-
- private:
-  friend class Type;
-
-  static Type* New(bitset bits) {
-    return reinterpret_cast<Type*>(static_cast<uintptr_t>(bits | 1u));
-  }
-
-  struct Boundary {
-    bitset internal;
-    bitset external;
-    double min;
-  };
-  static const Boundary BoundariesArray[];
-  static inline const Boundary* Boundaries();
-  static inline size_t BoundariesSize();
-};
-
-// -----------------------------------------------------------------------------
-// Superclass for non-bitset types (internal).
-class TypeBase {
- protected:
-  friend class Type;
-
-  enum Kind {
-    kClass,
-    kConstant,
-    kContext,
-    kArray,
-    kFunction,
-    kTuple,
-    kUnion,
-    kRange
-  };
-
-  Kind kind() const { return kind_; }
-  explicit TypeBase(Kind kind) : kind_(kind) {}
-
-  static bool IsKind(Type* type, Kind kind) {
-    if (BitsetType::IsBitset(type)) return false;
-    TypeBase* base = reinterpret_cast<TypeBase*>(type);
-    return base->kind() == kind;
-  }
-
-  // The hacky conversion to/from Type*.
-  static Type* AsType(TypeBase* type) { return reinterpret_cast<Type*>(type); }
-  static TypeBase* FromType(Type* type) {
-    return reinterpret_cast<TypeBase*>(type);
-  }
-
- private:
-  Kind kind_;
-};
-
-// -----------------------------------------------------------------------------
-// Class types.
-
-class ClassType : public TypeBase {
- public:
-  i::Handle<i::Map> Map() { return map_; }
-
- private:
-  friend class Type;
-  friend class BitsetType;
-
-  static Type* New(i::Handle<i::Map> map, Zone* zone) {
-    return AsType(new (zone->New(sizeof(ClassType)))
-                      ClassType(BitsetType::Lub(*map), map));
-  }
-
-  static ClassType* cast(Type* type) {
-    DCHECK(IsKind(type, kClass));
-    return static_cast<ClassType*>(FromType(type));
-  }
-
-  ClassType(BitsetType::bitset bitset, i::Handle<i::Map> map)
-      : TypeBase(kClass), bitset_(bitset), map_(map) {}
-
-  BitsetType::bitset Lub() { return bitset_; }
-
-  BitsetType::bitset bitset_;
-  Handle<i::Map> map_;
-};
-
-// -----------------------------------------------------------------------------
-// Constant types.
-
-class ConstantType : public TypeBase {
- public:
-  i::Handle<i::Object> Value() { return object_; }
-
- private:
-  friend class Type;
-  friend class BitsetType;
-
-  static Type* New(i::Handle<i::Object> value, Zone* zone) {
-    BitsetType::bitset bitset = BitsetType::Lub(*value);
-    return AsType(new (zone->New(sizeof(ConstantType)))
-                      ConstantType(bitset, value));
-  }
-
-  static ConstantType* cast(Type* type) {
-    DCHECK(IsKind(type, kConstant));
-    return static_cast<ConstantType*>(FromType(type));
-  }
-
-  ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
-      : TypeBase(kConstant), bitset_(bitset), object_(object) {}
-
-  BitsetType::bitset Lub() { return bitset_; }
-
-  BitsetType::bitset bitset_;
-  Handle<i::Object> object_;
-};
-// TODO(neis): Also cache value if numerical.
-// TODO(neis): Allow restricting the representation.
-
-// -----------------------------------------------------------------------------
-// Range types.
-
-class RangeType : public TypeBase {
- public:
-  struct Limits {
-    double min;
-    double max;
-    Limits(double min, double max) : min(min), max(max) {}
-    explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
-    bool IsEmpty();
-    static Limits Empty() { return Limits(1, 0); }
-    static Limits Intersect(Limits lhs, Limits rhs);
-    static Limits Union(Limits lhs, Limits rhs);
-  };
-
-  double Min() { return limits_.min; }
-  double Max() { return limits_.max; }
-
- private:
-  friend class Type;
-  friend class BitsetType;
-  friend class UnionType;
-
-  static Type* New(double min, double max, BitsetType::bitset representation,
-                   Zone* zone) {
-    return New(Limits(min, max), representation, zone);
-  }
-
-  static bool IsInteger(double x) {
-    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
-  }
-
-  static Type* New(Limits lim, BitsetType::bitset representation, Zone* zone) {
-    DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
-    DCHECK(lim.min <= lim.max);
-    DCHECK(REPRESENTATION(representation) == representation);
-    BitsetType::bitset bits =
-        SEMANTIC(BitsetType::Lub(lim.min, lim.max)) | representation;
-
-    return AsType(new (zone->New(sizeof(RangeType))) RangeType(bits, lim));
-  }
-
-  static RangeType* cast(Type* type) {
-    DCHECK(IsKind(type, kRange));
-    return static_cast<RangeType*>(FromType(type));
-  }
-
-  RangeType(BitsetType::bitset bitset, Limits limits)
-      : TypeBase(kRange), bitset_(bitset), limits_(limits) {}
-
-  BitsetType::bitset Lub() { return bitset_; }
-
-  BitsetType::bitset bitset_;
-  Limits limits_;
-};
-
-// -----------------------------------------------------------------------------
-// Context types.
-
-class ContextType : public TypeBase {
- public:
-  Type* Outer() { return outer_; }
-
- private:
-  friend class Type;
-
-  static Type* New(Type* outer, Zone* zone) {
-    return AsType(new (zone->New(sizeof(ContextType))) ContextType(outer));
-  }
-
-  static ContextType* cast(Type* type) {
-    DCHECK(IsKind(type, kContext));
-    return static_cast<ContextType*>(FromType(type));
-  }
-
-  explicit ContextType(Type* outer) : TypeBase(kContext), outer_(outer) {}
-
-  Type* outer_;
-};
-
-// -----------------------------------------------------------------------------
-// Array types.
-
-class ArrayType : public TypeBase {
- public:
-  Type* Element() { return element_; }
-
- private:
-  friend class Type;
-
-  explicit ArrayType(Type* element) : TypeBase(kArray), element_(element) {}
-
-  static Type* New(Type* element, Zone* zone) {
-    return AsType(new (zone->New(sizeof(ArrayType))) ArrayType(element));
-  }
-
-  static ArrayType* cast(Type* type) {
-    DCHECK(IsKind(type, kArray));
-    return static_cast<ArrayType*>(FromType(type));
-  }
-
-  Type* element_;
-};
-
-// -----------------------------------------------------------------------------
-// Superclass for types with variable number of type fields.
-class StructuralType : public TypeBase {
- public:
-  int LengthForTesting() { return Length(); }
-
- protected:
-  friend class Type;
-
-  int Length() { return length_; }
-
-  Type* Get(int i) {
-    DCHECK(0 <= i && i < this->Length());
-    return elements_[i];
-  }
-
-  void Set(int i, Type* type) {
-    DCHECK(0 <= i && i < this->Length());
-    elements_[i] = type;
-  }
-
-  void Shrink(int length) {
-    DCHECK(2 <= length && length <= this->Length());
-    length_ = length;
-  }
-
-  StructuralType(Kind kind, int length, i::Zone* zone)
-      : TypeBase(kind), length_(length) {
-    elements_ = reinterpret_cast<Type**>(zone->New(sizeof(Type*) * length));
-  }
-
- private:
-  int length_;
-  Type** elements_;
-};
-
-// -----------------------------------------------------------------------------
-// Function types.
-
-class FunctionType : public StructuralType {
- public:
-  int Arity() { return this->Length() - 2; }
-  Type* Result() { return this->Get(0); }
-  Type* Receiver() { return this->Get(1); }
-  Type* Parameter(int i) { return this->Get(2 + i); }
-
-  void InitParameter(int i, Type* type) { this->Set(2 + i, type); }
-
- private:
-  friend class Type;
-
-  FunctionType(Type* result, Type* receiver, int arity, Zone* zone)
-      : StructuralType(kFunction, 2 + arity, zone) {
-    Set(0, result);
-    Set(1, receiver);
-  }
-
-  static Type* New(Type* result, Type* receiver, int arity, Zone* zone) {
-    return AsType(new (zone->New(sizeof(FunctionType)))
-                      FunctionType(result, receiver, arity, zone));
-  }
-
-  static FunctionType* cast(Type* type) {
-    DCHECK(IsKind(type, kFunction));
-    return static_cast<FunctionType*>(FromType(type));
-  }
-};
-
-// -----------------------------------------------------------------------------
-// Tuple types.
-
-class TupleType : public StructuralType {
- public:
-  int Arity() { return this->Length(); }
-  Type* Element(int i) { return this->Get(i); }
-
-  void InitElement(int i, Type* type) { this->Set(i, type); }
-
- private:
-  friend class Type;
-
-  TupleType(int length, Zone* zone) : StructuralType(kTuple, length, zone) {}
-
-  static Type* New(int length, Zone* zone) {
-    return AsType(new (zone->New(sizeof(TupleType))) TupleType(length, zone));
-  }
-
-  static TupleType* cast(Type* type) {
-    DCHECK(IsKind(type, kTuple));
-    return static_cast<TupleType*>(FromType(type));
-  }
-};
-
-// -----------------------------------------------------------------------------
-// Union types (internal).
-// A union is a structured type with the following invariants:
-// - its length is at least 2
-// - at most one field is a bitset, and it must go into index 0
-// - no field is a union
-// - no field is a subtype of any other field
-class UnionType : public StructuralType {
- private:
-  friend Type;
-  friend BitsetType;
-
-  UnionType(int length, Zone* zone) : StructuralType(kUnion, length, zone) {}
-
-  static Type* New(int length, Zone* zone) {
-    return AsType(new (zone->New(sizeof(UnionType))) UnionType(length, zone));
-  }
-
-  static UnionType* cast(Type* type) {
-    DCHECK(IsKind(type, kUnion));
-    return static_cast<UnionType*>(FromType(type));
-  }
-
-  bool Wellformed();
-};
-
-class Type {
- public:
-  typedef BitsetType::bitset bitset;  // Internal
-
-// Constructors.
-#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
-  static Type* type() { return BitsetType::New(BitsetType::k##type); }
-  PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
-#undef DEFINE_TYPE_CONSTRUCTOR
-
-  static Type* SignedSmall() {
-    return BitsetType::New(BitsetType::SignedSmall());
-  }
-  static Type* UnsignedSmall() {
-    return BitsetType::New(BitsetType::UnsignedSmall());
-  }
-
-  static Type* Class(i::Handle<i::Map> map, Zone* zone) {
-    return ClassType::New(map, zone);
-  }
-  static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
-    return ConstantType::New(value, zone);
-  }
-  static Type* Range(double min, double max, Zone* zone) {
-    return RangeType::New(min, max, REPRESENTATION(BitsetType::kTagged |
-                                                   BitsetType::kUntaggedNumber),
-                          zone);
-  }
-  static Type* Context(Type* outer, Zone* zone) {
-    return ContextType::New(outer, zone);
-  }
-  static Type* Array(Type* element, Zone* zone) {
-    return ArrayType::New(element, zone);
-  }
-  static Type* Function(Type* result, Type* receiver, int arity, Zone* zone) {
-    return FunctionType::New(result, receiver, arity, zone);
-  }
-  static Type* Function(Type* result, Zone* zone) {
-    return Function(result, Any(), 0, zone);
-  }
-  static Type* Function(Type* result, Type* param0, Zone* zone) {
-    Type* function = Function(result, Any(), 1, zone);
-    function->AsFunction()->InitParameter(0, param0);
-    return function;
-  }
-  static Type* Function(Type* result, Type* param0, Type* param1, Zone* zone) {
-    Type* function = Function(result, Any(), 2, zone);
-    function->AsFunction()->InitParameter(0, param0);
-    function->AsFunction()->InitParameter(1, param1);
-    return function;
-  }
-  static Type* Function(Type* result, Type* param0, Type* param1, Type* param2,
-                        Zone* zone) {
-    Type* function = Function(result, Any(), 3, zone);
-    function->AsFunction()->InitParameter(0, param0);
-    function->AsFunction()->InitParameter(1, param1);
-    function->AsFunction()->InitParameter(2, param2);
-    return function;
-  }
-  static Type* Function(Type* result, int arity, Type** params, Zone* zone) {
-    Type* function = Function(result, Any(), arity, zone);
-    for (int i = 0; i < arity; ++i) {
-      function->AsFunction()->InitParameter(i, params[i]);
-    }
-    return function;
-  }
-  static Type* Tuple(Type* first, Type* second, Type* third, Zone* zone) {
-    Type* tuple = TupleType::New(3, zone);
-    tuple->AsTuple()->InitElement(0, first);
-    tuple->AsTuple()->InitElement(1, second);
-    tuple->AsTuple()->InitElement(2, third);
-    return tuple;
-  }
-
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  static Type* Name(Isolate* isolate, Zone* zone);
-  SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
-  static Type* Union(Type* type1, Type* type2, Zone* zone);
-  static Type* Intersect(Type* type1, Type* type2, Zone* zone);
-
-  static Type* Of(double value, Zone* zone) {
-    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
-  }
-  static Type* Of(i::Object* value, Zone* zone) {
-    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
-  }
-  static Type* Of(i::Handle<i::Object> value, Zone* zone) {
-    return Of(*value, zone);
-  }
-
-  // Extraction of components.
-  static Type* Representation(Type* t, Zone* zone);
-  static Type* Semantic(Type* t, Zone* zone);
-
-  // Predicates.
-  bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
-
-  bool Is(Type* that) { return this == that || this->SlowIs(that); }
-  bool Maybe(Type* that);
-  bool Equals(Type* that) { return this->Is(that) && that->Is(this); }
-
-  // Equivalent to Constant(val)->Is(this), but avoiding allocation.
-  bool Contains(i::Object* val);
-  bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
-
-  // State-dependent versions of the above that consider subtyping between
-  // a constant and its map class.
-  static Type* NowOf(i::Object* value, Zone* zone);
-  static Type* NowOf(i::Handle<i::Object> value, Zone* zone) {
-    return NowOf(*value, zone);
-  }
-  bool NowIs(Type* that);
-  bool NowContains(i::Object* val);
-  bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
-
-  bool NowStable();
-
-  // Inspection.
-  bool IsRange() { return IsKind(TypeBase::kRange); }
-  bool IsClass() { return IsKind(TypeBase::kClass); }
-  bool IsConstant() { return IsKind(TypeBase::kConstant); }
-  bool IsContext() { return IsKind(TypeBase::kContext); }
-  bool IsArray() { return IsKind(TypeBase::kArray); }
-  bool IsFunction() { return IsKind(TypeBase::kFunction); }
-  bool IsTuple() { return IsKind(TypeBase::kTuple); }
-
-  ClassType* AsClass() { return ClassType::cast(this); }
-  ConstantType* AsConstant() { return ConstantType::cast(this); }
-  RangeType* AsRange() { return RangeType::cast(this); }
-  ContextType* AsContext() { return ContextType::cast(this); }
-  ArrayType* AsArray() { return ArrayType::cast(this); }
-  FunctionType* AsFunction() { return FunctionType::cast(this); }
-  TupleType* AsTuple() { return TupleType::cast(this); }
-
-  // Minimum and maximum of a numeric type.
-  // These functions do not distinguish between -0 and +0.  If the type equals
-  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
-  // functions on subtypes of Number.
-  double Min();
-  double Max();
-
-  // Extracts a range from the type: if the type is a range or a union
-  // containing a range, that range is returned; otherwise, NULL is returned.
-  Type* GetRange();
-
-  static bool IsInteger(i::Object* x);
-  static bool IsInteger(double x) {
-    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
-  }
-
-  int NumClasses();
-  int NumConstants();
-
-  template <class T>
-  class Iterator {
-   public:
-    bool Done() const { return index_ < 0; }
-    i::Handle<T> Current();
-    void Advance();
-
-   private:
-    friend class Type;
-
-    Iterator() : index_(-1) {}
-    explicit Iterator(Type* type) : type_(type), index_(-1) { Advance(); }
-
-    inline bool matches(Type* type);
-    inline Type* get_type();
-
-    Type* type_;
-    int index_;
-  };
-
-  Iterator<i::Map> Classes() {
-    if (this->IsBitset()) return Iterator<i::Map>();
-    return Iterator<i::Map>(this);
-  }
-  Iterator<i::Object> Constants() {
-    if (this->IsBitset()) return Iterator<i::Object>();
-    return Iterator<i::Object>(this);
-  }
-
-  // Printing.
-
-  enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
-
-  void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS);  // NOLINT
-
-#ifdef DEBUG
-  void Print();
-#endif
-
-  // Helpers for testing.
-  bool IsBitsetForTesting() { return IsBitset(); }
-  bool IsUnionForTesting() { return IsUnion(); }
-  bitset AsBitsetForTesting() { return AsBitset(); }
-  UnionType* AsUnionForTesting() { return AsUnion(); }
-
- private:
-  // Friends.
-  template <class>
-  friend class Iterator;
-  friend BitsetType;
-  friend UnionType;
-
-  // Internal inspection.
-  bool IsKind(TypeBase::Kind kind) { return TypeBase::IsKind(this, kind); }
-
-  bool IsNone() { return this == None(); }
-  bool IsAny() { return this == Any(); }
-  bool IsBitset() { return BitsetType::IsBitset(this); }
-  bool IsUnion() { return IsKind(TypeBase::kUnion); }
-
-  bitset AsBitset() {
-    DCHECK(this->IsBitset());
-    return reinterpret_cast<BitsetType*>(this)->Bitset();
-  }
-  UnionType* AsUnion() { return UnionType::cast(this); }
-
-  bitset Representation();
-
-  // Auxiliary functions.
-  bool SemanticMaybe(Type* that);
-
-  bitset BitsetGlb() { return BitsetType::Glb(this); }
-  bitset BitsetLub() { return BitsetType::Lub(this); }
-
-  bool SlowIs(Type* that);
-  bool SemanticIs(Type* that);
-
-  static bool Overlap(RangeType* lhs, RangeType* rhs);
-  static bool Contains(RangeType* lhs, RangeType* rhs);
-  static bool Contains(RangeType* range, ConstantType* constant);
-  static bool Contains(RangeType* range, i::Object* val);
-
-  static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
-
-  static RangeType::Limits IntersectRangeAndBitset(Type* range, Type* bits,
-                                                   Zone* zone);
-  static RangeType::Limits ToLimits(bitset bits, Zone* zone);
-
-  bool SimplyEquals(Type* that);
-
-  static int AddToUnion(Type* type, UnionType* result, int size, Zone* zone);
-  static int IntersectAux(Type* type, Type* other, UnionType* result, int size,
-                          RangeType::Limits* limits, Zone* zone);
-  static Type* NormalizeUnion(Type* unioned, int size, Zone* zone);
-  static Type* NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone);
-};
-
-// -----------------------------------------------------------------------------
-// Type bounds. A simple struct to represent a pair of lower/upper types.
-
-struct Bounds {
-  Type* lower;
-  Type* upper;
-
-  Bounds()
-      :  // Make sure accessing uninitialized bounds crashes big-time.
-        lower(nullptr),
-        upper(nullptr) {}
-  explicit Bounds(Type* t) : lower(t), upper(t) {}
-  Bounds(Type* l, Type* u) : lower(l), upper(u) { DCHECK(lower->Is(upper)); }
-
-  // Unrestricted bounds.
-  static Bounds Unbounded() { return Bounds(Type::None(), Type::Any()); }
-
-  // Meet: both b1 and b2 are known to hold.
-  static Bounds Both(Bounds b1, Bounds b2, Zone* zone) {
-    Type* lower = Type::Union(b1.lower, b2.lower, zone);
-    Type* upper = Type::Intersect(b1.upper, b2.upper, zone);
-    // Lower bounds are considered approximate, correct as necessary.
-    if (!lower->Is(upper)) lower = upper;
-    return Bounds(lower, upper);
-  }
-
-  // Join: either b1 or b2 is known to hold.
-  static Bounds Either(Bounds b1, Bounds b2, Zone* zone) {
-    Type* lower = Type::Intersect(b1.lower, b2.lower, zone);
-    Type* upper = Type::Union(b1.upper, b2.upper, zone);
-    return Bounds(lower, upper);
-  }
-
-  static Bounds NarrowLower(Bounds b, Type* t, Zone* zone) {
-    Type* lower = Type::Union(b.lower, t, zone);
-    // Lower bounds are considered approximate, correct as necessary.
-    if (!lower->Is(b.upper)) lower = b.upper;
-    return Bounds(lower, b.upper);
-  }
-  static Bounds NarrowUpper(Bounds b, Type* t, Zone* zone) {
-    Type* lower = b.lower;
-    Type* upper = Type::Intersect(b.upper, t, zone);
-    // Lower bounds are considered approximate, correct as necessary.
-    if (!lower->Is(upper)) lower = upper;
-    return Bounds(lower, upper);
-  }
-
-  bool Narrows(Bounds that) {
-    return that.lower->Is(this->lower) && this->upper->Is(that.upper);
-  }
-};
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TYPES_H_
diff --git a/src/unicode-decoder.h b/src/unicode-decoder.h
index c030841..35d23a2 100644
--- a/src/unicode-decoder.h
+++ b/src/unicode-decoder.h
@@ -7,10 +7,11 @@
 
 #include <sys/types.h>
 #include "src/globals.h"
+#include "src/utils.h"
 
 namespace unibrow {
 
-class Utf8DecoderBase {
+class V8_EXPORT_PRIVATE Utf8DecoderBase {
  public:
   // Initialization done in subclass.
   inline Utf8DecoderBase();
diff --git a/src/unicode.cc b/src/unicode.cc
index db98be8..015f8a2 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -190,8 +190,7 @@
   }
 }
 
-
-static inline size_t NonASCIISequenceLength(byte first) {
+static inline uint8_t NonASCIISequenceLength(byte first) {
   // clang-format off
   static const uint8_t lengths[256] = {
       // The first 128 entries correspond to ASCII characters.
@@ -229,80 +228,137 @@
 // This method decodes an UTF-8 value according to RFC 3629.
 uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
   size_t length = NonASCIISequenceLength(str[0]);
-  if (length == 0 || max_length < length) {
-    *cursor += 1;
-    return kBadChar;
+
+  // Check continuation characters.
+  size_t max_count = std::min(length, max_length);
+  size_t count = 1;
+  while (count < max_count && IsContinuationCharacter(str[count])) {
+    count++;
   }
-  if (length == 2) {
-    if (!IsContinuationCharacter(str[1])) {
-      *cursor += 1;
-      return kBadChar;
-    }
-    *cursor += 2;
-    return ((str[0] << 6) + str[1]) - 0x00003080;
-  }
+  *cursor += count;
+
+  // There must be enough continuation characters.
+  if (count != length) return kBadChar;
+
+  // Check overly long sequences & other conditions.
   if (length == 3) {
-    switch (str[0]) {
-      case 0xE0:
-        // Overlong three-byte sequence.
-        if (str[1] < 0xA0 || str[1] > 0xBF) {
-          *cursor += 1;
-          return kBadChar;
-        }
-        break;
-      case 0xED:
-        // High and low surrogate halves.
-        if (str[1] < 0x80 || str[1] > 0x9F) {
-          *cursor += 1;
-          return kBadChar;
-        }
-        break;
-      default:
-        if (!IsContinuationCharacter(str[1])) {
-          *cursor += 1;
-          return kBadChar;
-        }
-    }
-    if (!IsContinuationCharacter(str[2])) {
-      *cursor += 1;
+    if (str[0] == 0xE0 && (str[1] < 0xA0 || str[1] > 0xBF)) {
+      // Overlong three-byte sequence?
+      return kBadChar;
+    } else if (str[0] == 0xED && (str[1] < 0x80 || str[1] > 0x9F)) {
+      // High and low surrogate halves?
       return kBadChar;
     }
-    *cursor += 3;
-    return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
-  }
-  DCHECK(length == 4);
-  switch (str[0]) {
-    case 0xF0:
+  } else if (length == 4) {
+    if (str[0] == 0xF0 && (str[1] < 0x90 || str[1] > 0xBF)) {
       // Overlong four-byte sequence.
-      if (str[1] < 0x90 || str[1] > 0xBF) {
-        *cursor += 1;
-        return kBadChar;
-      }
-      break;
-    case 0xF4:
+      return kBadChar;
+    } else if (str[0] == 0xF4 && (str[1] < 0x80 || str[1] > 0x8F)) {
       // Code points outside of the unicode range.
-      if (str[1] < 0x80 || str[1] > 0x8F) {
-        *cursor += 1;
-        return kBadChar;
-      }
-      break;
-    default:
-      if (!IsContinuationCharacter(str[1])) {
-        *cursor += 1;
-        return kBadChar;
-      }
+      return kBadChar;
+    }
   }
-  if (!IsContinuationCharacter(str[2])) {
-    *cursor += 1;
+
+  // All errors have been handled, so we only have to assemble the result.
+  switch (length) {
+    case 1:
+      return str[0];
+    case 2:
+      return ((str[0] << 6) + str[1]) - 0x00003080;
+    case 3:
+      return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
+    case 4:
+      return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
+             0x03C82080;
+  }
+
+  UNREACHABLE();
+  return kBadChar;
+}
+
+uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
+  DCHECK_NOT_NULL(buffer);
+
+  // The common case: 1-byte Utf8 (and no incomplete char in the buffer)
+  if (V8_LIKELY(next <= kMaxOneByteChar && *buffer == 0)) {
+    return static_cast<uchar>(next);
+  }
+
+  if (*buffer == 0) {
+    // We're at the start of a new character.
+    uint32_t kind = NonASCIISequenceLength(next);
+    if (kind >= 2 && kind <= 4) {
+      // Start of 2..4 byte character, and no buffer.
+
+      // The mask for the lower bits depends on the kind, and is
+      // 0x1F, 0x0F, 0x07 for kinds 2, 3, 4 respectively. We can get that
+      // with one shift.
+      uint8_t mask = 0x7f >> kind;
+
+      // Store the kind in the top nibble, and kind - 1 (i.e., remaining bytes)
+      // in 2nd nibble, and the value  in the bottom three. The 2nd nibble is
+      // intended as a counter about how many bytes are still needed.
+      *buffer = kind << 28 | (kind - 1) << 24 | (next & mask);
+      return kIncomplete;
+    } else {
+      // No buffer, and not the start of a 1-byte char (handled at the
+      // beginning), and not the start of a 2..4 byte char? Bad char.
+      *buffer = 0;
+      return kBadChar;
+    }
+  } else if (*buffer <= 0xff) {
+    // We have one unprocessed byte left (from the last else case in this if
+    // statement).
+    uchar previous = *buffer;
+    *buffer = 0;
+    uchar t = ValueOfIncremental(previous, buffer);
+    if (t == kIncomplete) {
+      // If we have an incomplete character, process both the previous and the
+      // next byte at once.
+      return ValueOfIncremental(next, buffer);
+    } else {
+      // Otherwise, process the previous byte and save the next byte for next
+      // time.
+      DCHECK_EQ(0, *buffer);
+      *buffer = next;
+      return t;
+    }
+  } else if (IsContinuationCharacter(next)) {
+    // We're inside of a character, as described by buffer.
+
+    // How many bytes (excluding this one) do we still expect?
+    uint8_t bytes_expected = *buffer >> 28;
+    uint8_t bytes_left = (*buffer >> 24) & 0x0f;
+    bytes_left--;
+    // Update the value.
+    uint32_t value = ((*buffer & 0xffffff) << 6) | (next & 0x3F);
+    if (bytes_left) {
+      *buffer = (bytes_expected << 28 | bytes_left << 24 | value);
+      return kIncomplete;
+    } else {
+      *buffer = 0;
+      bool sequence_was_too_long = (bytes_expected == 2 && value < 0x80) ||
+                                   (bytes_expected == 3 && value < 0x800);
+      return sequence_was_too_long ? kBadChar : value;
+    }
+  } else {
+    // Within a character, but not a continuation character? Then the
+    // previous char was a bad char. But we need to save the current
+    // one.
+    *buffer = next;
     return kBadChar;
   }
-  if (!IsContinuationCharacter(str[3])) {
-    *cursor += 1;
-    return kBadChar;
+}
+
+uchar Utf8::ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer) {
+  DCHECK_NOT_NULL(buffer);
+  if (*buffer == 0) {
+    return kBufferEmpty;
+  } else {
+    // Process left-over chars. An incomplete char at the end maps to kBadChar.
+    uchar t = ValueOfIncremental(0, buffer);
+    return (t == kIncomplete) ? kBadChar : t;
   }
-  *cursor += 4;
-  return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
-         0x03C82080;
 }
 
 bool Utf8::Validate(const byte* bytes, size_t length) {
diff --git a/src/unicode.h b/src/unicode.h
index 35717bc..1299a8f 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -141,6 +141,8 @@
   // The unicode replacement character, used to signal invalid unicode
   // sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding.
   static const uchar kBadChar = 0xFFFD;
+  static const uchar kBufferEmpty = 0x0;
+  static const uchar kIncomplete = 0xFFFFFFFC;  // any non-valid code point.
   static const unsigned kMaxEncodedSize   = 4;
   static const unsigned kMaxOneByteChar   = 0x7f;
   static const unsigned kMaxTwoByteChar   = 0x7ff;
@@ -156,6 +158,11 @@
   static const unsigned kMax16BitCodeUnitSize  = 3;
   static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
 
+  typedef uint32_t Utf8IncrementalBuffer;
+  static uchar ValueOfIncremental(byte next_byte,
+                                  Utf8IncrementalBuffer* buffer);
+  static uchar ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer);
+
   // Excludes non-characters from the set of valid code points.
   static inline bool IsValidCharacter(uchar c);
 
diff --git a/src/utils.cc b/src/utils.cc
index 16b5b7c..ef640c3 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -387,8 +387,8 @@
   }
 }
 
-
-MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+    &MemCopyUint8Wrapper;
 MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
     &MemCopyUint16Uint8Wrapper;
 // Defined in codegen-arm.cc.
@@ -398,7 +398,8 @@
     Isolate* isolate, MemCopyUint16Uint8Function stub);
 
 #elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
-MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+    &MemCopyUint8Wrapper;
 // Defined in codegen-mips.cc.
 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                 MemCopyUint8Function stub);
diff --git a/src/utils.h b/src/utils.h
index 8eca392..314ea9b 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -234,6 +234,10 @@
 }
 
 inline double Pow(double x, double y) {
+  if (y == 0.0) return 1.0;
+  if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+    return std::numeric_limits<double>::quiet_NaN();
+  }
 #if (defined(__MINGW64_VERSION_MAJOR) &&                              \
      (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
     defined(V8_OS_AIX)
@@ -433,7 +437,7 @@
 const int kMinComplexMemCopy = 64;
 
 // Copy memory area. No restrictions.
-void MemMove(void* dest, const void* src, size_t size);
+V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
 
 // Keep the distinction of "move" vs. "copy" for the benefit of other
@@ -444,7 +448,7 @@
 #elif defined(V8_HOST_ARCH_ARM)
 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
                                      size_t size);
-extern MemCopyUint8Function memcopy_uint8_function;
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
                                    size_t chars) {
   memcpy(dest, src, chars);
@@ -455,7 +459,8 @@
   (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
                             reinterpret_cast<const uint8_t*>(src), size);
 }
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+                                         size_t size) {
   memmove(dest, src, size);
 }
 
@@ -473,7 +478,7 @@
 #elif defined(V8_HOST_ARCH_MIPS)
 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
                                      size_t size);
-extern MemCopyUint8Function memcopy_uint8_function;
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
                                    size_t chars) {
   memcpy(dest, src, chars);
@@ -484,7 +489,8 @@
   (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
                             reinterpret_cast<const uint8_t*>(src), size);
 }
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+                                         size_t size) {
   memmove(dest, src, size);
 }
 #else
@@ -492,7 +498,8 @@
 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
   memcpy(dest, src, size);
 }
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+                                         size_t size) {
   memmove(dest, src, size);
 }
 const int kMinComplexMemCopy = 16 * kPointerSize;
diff --git a/src/v8.gyp b/src/v8.gyp
index 1adb2fe..9a38247 100644
--- a/src/v8.gyp
+++ b/src/v8.gyp
@@ -34,10 +34,11 @@
     'warmup_script%': "",
     'v8_extra_library_files%': [],
     'v8_experimental_extra_library_files%': [],
+    'v8_enable_inspector%': 0,
     'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
     'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
   },
-  'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+  'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
   'targets': [
     {
       'target_name': 'v8',
@@ -60,12 +61,10 @@
             '..',
           ],
           'defines': [
-            'V8_SHARED',
             'BUILDING_V8_SHARED',
           ],
           'direct_dependent_settings': {
             'defines': [
-              'V8_SHARED',
               'USING_V8_SHARED',
             ],
           },
@@ -163,12 +162,10 @@
         }],
         ['component=="shared_library"', {
           'defines': [
-            'V8_SHARED',
             'BUILDING_V8_SHARED',
           ],
           'direct_dependent_settings': {
             'defines': [
-              'V8_SHARED',
               'USING_V8_SHARED',
             ],
           },
@@ -258,7 +255,6 @@
         ['component=="shared_library"', {
           'defines': [
             'BUILDING_V8_SHARED',
-            'V8_SHARED',
           ],
         }],
       ]
@@ -285,12 +281,10 @@
             }],
             ['component=="shared_library"', {
               'defines': [
-                'V8_SHARED',
                 'BUILDING_V8_SHARED',
               ],
               'direct_dependent_settings': {
                 'defines': [
-                  'V8_SHARED',
                   'USING_V8_SHARED',
                 ],
               },
@@ -451,10 +445,14 @@
         'ast/ast-numbering.h',
         'ast/ast-traversal-visitor.h',
         'ast/ast-type-bounds.h',
+        'ast/ast-types.cc',
+        'ast/ast-types.h',
         'ast/ast-value-factory.cc',
         'ast/ast-value-factory.h',
         'ast/ast.cc',
         'ast/ast.h',
+        'ast/compile-time-value.cc',
+        'ast/compile-time-value.h',
         'ast/context-slot-cache.cc',
         'ast/context-slot-cache.h',
         'ast/modules.cc',
@@ -462,7 +460,6 @@
         'ast/prettyprinter.cc',
         'ast/prettyprinter.h',
         'ast/scopeinfo.cc',
-        'ast/scopeinfo.h',
         'ast/scopes.cc',
         'ast/scopes.h',
         'ast/variables.cc',
@@ -498,12 +495,14 @@
         'builtins/builtins-handler.cc',
         'builtins/builtins-internal.cc',
         'builtins/builtins-interpreter.cc',
+        'builtins/builtins-iterator.cc',
         'builtins/builtins-json.cc',
         'builtins/builtins-math.cc',
         'builtins/builtins-number.cc',
         'builtins/builtins-object.cc',
         'builtins/builtins-proxy.cc',
         'builtins/builtins-reflect.cc',
+        'builtins/builtins-regexp.cc',
         'builtins/builtins-sharedarraybuffer.cc',
         'builtins/builtins-string.cc',
         'builtins/builtins-symbol.cc',
@@ -534,6 +533,8 @@
         'compilation-cache.h',
         'compilation-dependencies.cc',
         'compilation-dependencies.h',
+        'compilation-info.cc',
+        'compilation-info.h',
         'compilation-statistics.cc',
         'compilation-statistics.h',
         'compiler/access-builder.cc',
@@ -583,14 +584,14 @@
         'compiler/effect-control-linearizer.h',
         'compiler/escape-analysis.cc',
         'compiler/escape-analysis.h',
-        "compiler/escape-analysis-reducer.cc",
-        "compiler/escape-analysis-reducer.h",
+        'compiler/escape-analysis-reducer.cc',
+        'compiler/escape-analysis-reducer.h',
         'compiler/frame.cc',
         'compiler/frame.h',
         'compiler/frame-elider.cc',
         'compiler/frame-elider.h',
-        "compiler/frame-states.cc",
-        "compiler/frame-states.h",
+        'compiler/frame-states.cc',
+        'compiler/frame-states.h',
         'compiler/gap-resolver.cc',
         'compiler/gap-resolver.h',
         'compiler/graph-reducer.cc',
@@ -661,6 +662,8 @@
         'compiler/machine-operator-reducer.h',
         'compiler/machine-operator.cc',
         'compiler/machine-operator.h',
+        'compiler/machine-graph-verifier.cc',
+        'compiler/machine-graph-verifier.h',
         'compiler/memory-optimizer.cc',
         'compiler/memory-optimizer.h',
         'compiler/move-optimizer.cc',
@@ -720,10 +723,14 @@
         'compiler/store-store-elimination.h',
         'compiler/tail-call-optimization.cc',
         'compiler/tail-call-optimization.h',
+        'compiler/types.cc',
+        'compiler/types.h',
+        'compiler/type-cache.cc',
+        'compiler/type-cache.h',
         'compiler/type-hint-analyzer.cc',
         'compiler/type-hint-analyzer.h',
-        'compiler/type-hints.cc',
-        'compiler/type-hints.h',
+        'compiler/typed-optimization.cc',
+        'compiler/typed-optimization.h',
         'compiler/typer.cc',
         'compiler/typer.h',
         'compiler/unwinding-info-writer.h',
@@ -949,6 +956,7 @@
         'ic/call-optimization.h',
         'ic/handler-compiler.cc',
         'ic/handler-compiler.h',
+        'ic/handler-configuration.h',
         'ic/ic-inl.h',
         'ic/ic-state.cc',
         'ic/ic-state.h',
@@ -978,6 +986,8 @@
         'interpreter/bytecode-generator.h',
         'interpreter/bytecode-label.cc',
         'interpreter/bytecode-label.h',
+        'interpreter/bytecode-operands.cc',
+        'interpreter/bytecode-operands.h',
         'interpreter/bytecode-peephole-optimizer.cc',
         'interpreter/bytecode-peephole-optimizer.h',
         'interpreter/bytecode-peephole-table.h',
@@ -985,7 +995,6 @@
         'interpreter/bytecode-pipeline.h',
         'interpreter/bytecode-register.cc',
         'interpreter/bytecode-register.h',
-        'interpreter/bytecode-register-allocator.cc',
         'interpreter/bytecode-register-allocator.h',
         'interpreter/bytecode-register-optimizer.cc',
         'interpreter/bytecode-register-optimizer.h',
@@ -1023,6 +1032,9 @@
         'log-utils.h',
         'log.cc',
         'log.h',
+        'lookup-cache-inl.h',
+        'lookup-cache.cc',
+        'lookup-cache.h',
         'lookup.cc',
         'lookup.h',
         'macro-assembler.h',
@@ -1040,6 +1052,8 @@
         'objects.h',
         'ostreams.cc',
         'ostreams.h',
+        'parsing/duplicate-finder.cc',
+        'parsing/duplicate-finder.h',
         'parsing/expression-classifier.h',
         'parsing/func-name-inferrer.cc',
         'parsing/func-name-inferrer.h',
@@ -1091,6 +1105,8 @@
         'profiler/strings-storage.h',
         'profiler/tick-sample.cc',
         'profiler/tick-sample.h',
+        'profiler/tracing-cpu-profiler.cc',
+        'profiler/tracing-cpu-profiler.h',
         'profiler/unbound-queue-inl.h',
         'profiler/unbound-queue.h',
         'property-descriptor.cc',
@@ -1199,15 +1215,13 @@
         'transitions-inl.h',
         'transitions.cc',
         'transitions.h',
-        'type-cache.cc',
-        'type-cache.h',
         'type-feedback-vector-inl.h',
         'type-feedback-vector.cc',
         'type-feedback-vector.h',
+        'type-hints.cc',
+        'type-hints.h',
         'type-info.cc',
         'type-info.h',
-        'types.cc',
-        'types.h',
         'unicode-inl.h',
         'unicode.cc',
         'unicode.h',
@@ -1235,8 +1249,6 @@
         'wasm/ast-decoder.cc',
         'wasm/ast-decoder.h',
         'wasm/decoder.h',
-        'wasm/encoder.cc',
-        'wasm/encoder.h',
         'wasm/leb-helper.h',
         'wasm/module-decoder.cc',
         'wasm/module-decoder.h',
@@ -1253,16 +1265,22 @@
         'wasm/wasm-macro-gen.h',
         'wasm/wasm-module.cc',
         'wasm/wasm-module.h',
+        'wasm/wasm-module-builder.cc',
+        'wasm/wasm-module-builder.h',
         'wasm/wasm-interpreter.cc',
         'wasm/wasm-interpreter.h',
         'wasm/wasm-opcodes.cc',
         'wasm/wasm-opcodes.h',
         'wasm/wasm-result.cc',
         'wasm/wasm-result.h',
-        'zone.cc',
-        'zone.h',
-        'zone-allocator.h',
-        'zone-containers.h',
+        'zone/accounting-allocator.cc',
+        'zone/accounting-allocator.h',
+        'zone/zone-segment.cc',
+        'zone/zone-segment.h',
+        'zone/zone.cc',
+        'zone/zone.h',
+        'zone/zone-allocator.h',
+        'zone/zone-containers.h',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
@@ -1399,6 +1417,8 @@
             'ia32/interface-descriptors-ia32.cc',
             'ia32/macro-assembler-ia32.cc',
             'ia32/macro-assembler-ia32.h',
+            'ia32/simulator-ia32.cc',
+            'ia32/simulator-ia32.h',
             'builtins/ia32/builtins-ia32.cc',
             'compiler/ia32/code-generator-ia32.cc',
             'compiler/ia32/instruction-codes-ia32.h',
@@ -1438,6 +1458,8 @@
             'x87/interface-descriptors-x87.cc',
             'x87/macro-assembler-x87.cc',
             'x87/macro-assembler-x87.h',
+            'x87/simulator-x87.cc',
+            'x87/simulator-x87.h',
             'builtins/x87/builtins-x87.cc',
             'compiler/x87/code-generator-x87.cc',
             'compiler/x87/instruction-codes-x87.h',
@@ -1546,9 +1568,15 @@
             'regexp/mips64/regexp-macro-assembler-mips64.h',
           ],
         }],
-        ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
+        ['v8_target_arch=="x64"', {
           'sources': [  ### gcmole(arch:x64) ###
             'builtins/x64/builtins-x64.cc',
+            'compiler/x64/code-generator-x64.cc',
+            'compiler/x64/instruction-codes-x64.h',
+            'compiler/x64/instruction-scheduler-x64.cc',
+            'compiler/x64/instruction-selector-x64.cc',
+            'compiler/x64/unwinding-info-writer-x64.h',
+            'compiler/x64/unwinding-info-writer-x64.cc',
             'crankshaft/x64/lithium-codegen-x64.cc',
             'crankshaft/x64/lithium-codegen-x64.h',
             'crankshaft/x64/lithium-gap-resolver-x64.cc',
@@ -1565,11 +1593,15 @@
             'x64/cpu-x64.cc',
             'x64/deoptimizer-x64.cc',
             'x64/disasm-x64.cc',
+            'x64/eh-frame-x64.cc',
             'x64/frames-x64.cc',
             'x64/frames-x64.h',
             'x64/interface-descriptors-x64.cc',
             'x64/macro-assembler-x64.cc',
             'x64/macro-assembler-x64.h',
+            'x64/simulator-x64.cc',
+            'x64/simulator-x64.h',
+            'x64/sse-instr.h',
             'debug/x64/debug-x64.cc',
             'full-codegen/x64/full-codegen-x64.cc',
             'ic/x64/access-compiler-x64.cc',
@@ -1579,17 +1611,7 @@
             'ic/x64/stub-cache-x64.cc',
             'regexp/x64/regexp-macro-assembler-x64.cc',
             'regexp/x64/regexp-macro-assembler-x64.h',
-          ],
-        }],
-        ['v8_target_arch=="x64"', {
-          'sources': [
-            'compiler/x64/code-generator-x64.cc',
-            'compiler/x64/instruction-codes-x64.h',
-            'compiler/x64/instruction-scheduler-x64.cc',
-            'compiler/x64/instruction-selector-x64.cc',
-            'compiler/x64/unwinding-info-writer-x64.h',
-            'compiler/x64/unwinding-info-writer-x64.cc',
-            'x64/eh-frame-x64.cc',
+            'third_party/valgrind/valgrind.h',
           ],
         }],
         ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
@@ -1691,7 +1713,6 @@
         ['component=="shared_library"', {
           'defines': [
             'BUILDING_V8_SHARED',
-            'V8_SHARED',
           ],
         }],
         ['v8_postmortem_support=="true"', {
@@ -1723,6 +1744,16 @@
             'i18n.h',
           ],
         }],
+        ['v8_enable_inspector==1', {
+          'sources': [
+            '<@(inspector_all_sources)'
+          ],
+          'dependencies': [
+            'inspector/inspector.gyp:protocol_generated_sources',
+            'inspector/inspector.gyp:inspector_injected_script',
+            'inspector/inspector.gyp:inspector_debugger_script',
+          ],
+        }],
         ['OS=="win" and v8_enable_i18n_support==1', {
           'dependencies': [
             '<(icu_gyp_path):icudata',
@@ -1740,8 +1771,6 @@
         '..',
       ],
       'sources': [
-        'base/accounting-allocator.cc',
-        'base/accounting-allocator.h',
         'base/adapters.h',
         'base/atomic-utils.h',
         'base/atomicops.h',
@@ -1775,6 +1804,7 @@
         'base/functional.cc',
         'base/functional.h',
         'base/hashmap.h',
+        'base/hashmap-entry.h',
         'base/ieee754.cc',
         'base/ieee754.h',
         'base/iterator.h',
@@ -2171,17 +2201,16 @@
           'js/regexp.js',
           'js/arraybuffer.js',
           'js/typedarray.js',
-          'js/iterator-prototype.js',
           'js/collection.js',
           'js/weak-collection.js',
           'js/collection-iterator.js',
           'js/promise.js',
           'js/messages.js',
           'js/array-iterator.js',
-          'js/string-iterator.js',
           'js/templates.js',
           'js/spread.js',
           'js/proxy.js',
+          'js/async-await.js',
           'debug/mirrors.js',
           'debug/debug.js',
           'debug/liveedit.js',
@@ -2192,7 +2221,6 @@
           'js/harmony-atomics.js',
           'js/harmony-simd.js',
           'js/harmony-string-padding.js',
-          'js/harmony-async-await.js'
         ],
         'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
         'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
@@ -2202,8 +2230,8 @@
           ['v8_enable_i18n_support==1', {
             'library_files': ['js/i18n.js'],
             'experimental_library_files': [
+              'js/datetime-format-to-parts.js',
               'js/icu-case-mapping.js',
-              'js/intl-extra.js',
              ],
           }],
         ],
@@ -2402,7 +2430,10 @@
         '..',
        ],
       'sources': [
+        'interpreter/bytecode-operands.h',
+        'interpreter/bytecode-operands.cc',
         'interpreter/bytecode-peephole-table.h',
+        'interpreter/bytecode-traits.h',
         'interpreter/bytecodes.h',
         'interpreter/bytecodes.cc',
         'interpreter/mkpeephole.cc'
diff --git a/src/value-serializer.cc b/src/value-serializer.cc
index 0af4838..1d2e36d 100644
--- a/src/value-serializer.cc
+++ b/src/value-serializer.cc
@@ -7,16 +7,19 @@
 #include <type_traits>
 
 #include "src/base/logging.h"
+#include "src/conversions.h"
 #include "src/factory.h"
 #include "src/handles-inl.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
 #include "src/objects.h"
+#include "src/transitions.h"
 
 namespace v8 {
 namespace internal {
 
 static const uint32_t kLatestVersion = 9;
+static const int kPretenureThreshold = 100 * KB;
 
 template <typename T>
 static size_t BytesNeededForVarint(T value) {
@@ -82,12 +85,54 @@
   // Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data,
   // flags:uint32_t.
   kRegExp = 'R',
+  // Beginning of a JS map.
+  kBeginJSMap = ';',
+  // End of a JS map. length:uint32_t.
+  kEndJSMap = ':',
+  // Beginning of a JS set.
+  kBeginJSSet = '\'',
+  // End of a JS set. length:uint32_t.
+  kEndJSSet = ',',
+  // Array buffer. byteLength:uint32_t, then raw data.
+  kArrayBuffer = 'B',
+  // Array buffer (transferred). transferID:uint32_t
+  kArrayBufferTransfer = 't',
+  // View into an array buffer.
+  // subtag:ArrayBufferViewTag, byteOffset:uint32_t, byteLength:uint32_t
+  // For typed arrays, byteOffset and byteLength must be divisible by the size
+  // of the element.
+  // Note: kArrayBufferView is special, and should have an ArrayBuffer (or an
+  // ObjectReference to one) serialized just before it. This is a quirk arising
+  // from the previous stack-based implementation.
+  kArrayBufferView = 'V',
+  // Shared array buffer (transferred). transferID:uint32_t
+  kSharedArrayBufferTransfer = 'u',
 };
 
-ValueSerializer::ValueSerializer(Isolate* isolate)
+namespace {
+
+enum class ArrayBufferViewTag : uint8_t {
+  kInt8Array = 'b',
+  kUint8Array = 'B',
+  kUint8ClampedArray = 'C',
+  kInt16Array = 'w',
+  kUint16Array = 'W',
+  kInt32Array = 'd',
+  kUint32Array = 'D',
+  kFloat32Array = 'f',
+  kFloat64Array = 'F',
+  kDataView = '?',
+};
+
+}  // namespace
+
+ValueSerializer::ValueSerializer(Isolate* isolate,
+                                 v8::ValueSerializer::Delegate* delegate)
     : isolate_(isolate),
+      delegate_(delegate),
       zone_(isolate->allocator()),
-      id_map_(isolate->heap(), &zone_) {}
+      id_map_(isolate->heap(), &zone_),
+      array_buffer_transfer_map_(isolate->heap(), &zone_) {}
 
 ValueSerializer::~ValueSerializer() {}
 
@@ -150,6 +195,11 @@
                  reinterpret_cast<const uint8_t*>(chars.end()));
 }
 
+void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
+  const uint8_t* begin = reinterpret_cast<const uint8_t*>(source);
+  buffer_.insert(buffer_.end(), begin, begin + length);
+}
+
 uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
   if (!bytes) return nullptr;
   auto old_size = buffer_.size();
@@ -157,6 +207,20 @@
   return &buffer_[old_size];
 }
 
+void ValueSerializer::WriteUint32(uint32_t value) {
+  WriteVarint<uint32_t>(value);
+}
+
+void ValueSerializer::WriteUint64(uint64_t value) {
+  WriteVarint<uint64_t>(value);
+}
+
+void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
+                                          Handle<JSArrayBuffer> array_buffer) {
+  DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
+  array_buffer_transfer_map_.Set(array_buffer, transfer_id);
+}
+
 Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
   if (object->IsSmi()) {
     WriteSmi(Smi::cast(*object));
@@ -172,15 +236,33 @@
     case MUTABLE_HEAP_NUMBER_TYPE:
       WriteHeapNumber(HeapNumber::cast(*object));
       return Just(true);
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE: {
+      // Despite being JSReceivers, these have their wrapped buffer serialized
+      // first. That makes this logic a little quirky, because it needs to
+      // happen before we assign object IDs.
+      // TODO(jbroman): It may be possible to avoid materializing a typed
+      // array's buffer here.
+      Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(object);
+      if (!id_map_.Find(view)) {
+        Handle<JSArrayBuffer> buffer(
+            view->IsJSTypedArray()
+                ? Handle<JSTypedArray>::cast(view)->GetBuffer()
+                : handle(JSArrayBuffer::cast(view->buffer()), isolate_));
+        if (!WriteJSReceiver(buffer).FromMaybe(false)) return Nothing<bool>();
+      }
+      return WriteJSReceiver(view);
+    }
     default:
       if (object->IsString()) {
         WriteString(Handle<String>::cast(object));
         return Just(true);
       } else if (object->IsJSReceiver()) {
         return WriteJSReceiver(Handle<JSReceiver>::cast(object));
+      } else {
+        ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
+        return Nothing<bool>();
       }
-      UNIMPLEMENTED();
-      return Nothing<bool>();
   }
 }
 
@@ -267,20 +349,27 @@
 
   // Eliminate callable and exotic objects, which should not be serialized.
   InstanceType instance_type = receiver->map()->instance_type();
-  if (receiver->IsCallable() || instance_type <= LAST_SPECIAL_RECEIVER_TYPE) {
+  if (receiver->IsCallable() || (instance_type <= LAST_SPECIAL_RECEIVER_TYPE &&
+                                 instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
+    ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
     return Nothing<bool>();
   }
 
   // If we are at the end of the stack, abort. This function may recurse.
-  if (StackLimitCheck(isolate_).HasOverflowed()) return Nothing<bool>();
+  STACK_CHECK(isolate_, Nothing<bool>());
 
   HandleScope scope(isolate_);
   switch (instance_type) {
     case JS_ARRAY_TYPE:
       return WriteJSArray(Handle<JSArray>::cast(receiver));
     case JS_OBJECT_TYPE:
-    case JS_API_OBJECT_TYPE:
-      return WriteJSObject(Handle<JSObject>::cast(receiver));
+    case JS_API_OBJECT_TYPE: {
+      Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
+      return js_object->GetInternalFieldCount() ? WriteHostObject(js_object)
+                                                : WriteJSObject(js_object);
+    }
+    case JS_SPECIAL_API_OBJECT_TYPE:
+      return WriteHostObject(Handle<JSObject>::cast(receiver));
     case JS_DATE_TYPE:
       WriteJSDate(JSDate::cast(*receiver));
       return Just(true);
@@ -289,21 +378,76 @@
     case JS_REGEXP_TYPE:
       WriteJSRegExp(JSRegExp::cast(*receiver));
       return Just(true);
+    case JS_MAP_TYPE:
+      return WriteJSMap(Handle<JSMap>::cast(receiver));
+    case JS_SET_TYPE:
+      return WriteJSSet(Handle<JSSet>::cast(receiver));
+    case JS_ARRAY_BUFFER_TYPE:
+      return WriteJSArrayBuffer(JSArrayBuffer::cast(*receiver));
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE:
+      return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
     default:
-      UNIMPLEMENTED();
-      break;
+      ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
+      return Nothing<bool>();
   }
   return Nothing<bool>();
 }
 
 Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
+  DCHECK_GT(object->map()->instance_type(), LAST_CUSTOM_ELEMENTS_RECEIVER);
+  const bool can_serialize_fast =
+      object->HasFastProperties() && object->elements()->length() == 0;
+  if (!can_serialize_fast) return WriteJSObjectSlow(object);
+
+  Handle<Map> map(object->map(), isolate_);
+  WriteTag(SerializationTag::kBeginJSObject);
+
+  // Write out fast properties as long as they are only data properties and the
+  // map doesn't change.
+  uint32_t properties_written = 0;
+  bool map_changed = false;
+  for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+    Handle<Name> key(map->instance_descriptors()->GetKey(i), isolate_);
+    if (!key->IsString()) continue;
+    PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+    if (details.IsDontEnum()) continue;
+
+    Handle<Object> value;
+    if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
+    if (V8_LIKELY(!map_changed && details.type() == DATA)) {
+      FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+      value = JSObject::FastPropertyAt(object, details.representation(),
+                                       field_index);
+    } else {
+      // This logic should essentially match WriteJSObjectPropertiesSlow.
+      // If the property is no longer found, do not serialize it.
+      // This could happen if a getter deleted the property.
+      LookupIterator it(isolate_, object, key, LookupIterator::OWN);
+      if (!it.IsFound()) continue;
+      if (!Object::GetProperty(&it).ToHandle(&value)) return Nothing<bool>();
+    }
+
+    if (!WriteObject(key).FromMaybe(false) ||
+        !WriteObject(value).FromMaybe(false)) {
+      return Nothing<bool>();
+    }
+    properties_written++;
+  }
+
+  WriteTag(SerializationTag::kEndJSObject);
+  WriteVarint<uint32_t>(properties_written);
+  return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
   WriteTag(SerializationTag::kBeginJSObject);
   Handle<FixedArray> keys;
   uint32_t properties_written;
   if (!KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
                                ENUMERABLE_STRINGS)
            .ToHandle(&keys) ||
-      !WriteJSObjectProperties(object, keys).To(&properties_written)) {
+      !WriteJSObjectPropertiesSlow(object, keys).To(&properties_written)) {
     return Nothing<bool>();
   }
   WriteTag(SerializationTag::kEndJSObject);
@@ -331,7 +475,46 @@
     // format changes.
     WriteTag(SerializationTag::kBeginDenseJSArray);
     WriteVarint<uint32_t>(length);
-    for (uint32_t i = 0; i < length; i++) {
+    uint32_t i = 0;
+
+    // Fast paths. Note that FAST_ELEMENTS in particular can bail due to the
+    // structure of the elements changing.
+    switch (array->GetElementsKind()) {
+      case FAST_SMI_ELEMENTS: {
+        Handle<FixedArray> elements(FixedArray::cast(array->elements()),
+                                    isolate_);
+        for (; i < length; i++) WriteSmi(Smi::cast(elements->get(i)));
+        break;
+      }
+      case FAST_DOUBLE_ELEMENTS: {
+        Handle<FixedDoubleArray> elements(
+            FixedDoubleArray::cast(array->elements()), isolate_);
+        for (; i < length; i++) {
+          WriteTag(SerializationTag::kDouble);
+          WriteDouble(elements->get_scalar(i));
+        }
+        break;
+      }
+      case FAST_ELEMENTS: {
+        Handle<Object> old_length(array->length(), isolate_);
+        for (; i < length; i++) {
+          if (array->length() != *old_length ||
+              array->GetElementsKind() != FAST_ELEMENTS) {
+            // Fall back to slow path.
+            break;
+          }
+          Handle<Object> element(FixedArray::cast(array->elements())->get(i),
+                                 isolate_);
+          if (!WriteObject(element).FromMaybe(false)) return Nothing<bool>();
+        }
+        break;
+      }
+      default:
+        break;
+    }
+
+    // If there are elements remaining, serialize them slowly.
+    for (; i < length; i++) {
       // Serializing the array's elements can have arbitrary side effects, so we
       // cannot rely on still having fast elements, even if it did to begin
       // with.
@@ -342,6 +525,7 @@
         return Nothing<bool>();
       }
     }
+
     KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
                                ENUMERABLE_STRINGS);
     if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
@@ -350,7 +534,7 @@
     Handle<FixedArray> keys =
         accumulator.GetKeys(GetKeysConversion::kConvertToString);
     uint32_t properties_written;
-    if (!WriteJSObjectProperties(array, keys).To(&properties_written)) {
+    if (!WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
       return Nothing<bool>();
     }
     WriteTag(SerializationTag::kEndDenseJSArray);
@@ -364,7 +548,7 @@
     if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
                                  ENUMERABLE_STRINGS)
              .ToHandle(&keys) ||
-        !WriteJSObjectProperties(array, keys).To(&properties_written)) {
+        !WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
       return Nothing<bool>();
     }
     WriteTag(SerializationTag::kEndSparseJSArray);
@@ -401,6 +585,7 @@
                           v8::String::NO_NULL_TERMINATION);
   } else {
     DCHECK(inner_value->IsSymbol());
+    ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
     return Nothing<bool>();
   }
   return Just(true);
@@ -417,7 +602,135 @@
   WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
 }
 
-Maybe<uint32_t> ValueSerializer::WriteJSObjectProperties(
+Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
+  // First copy the key-value pairs, since getters could mutate them.
+  Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+  int length = table->NumberOfElements() * 2;
+  Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+  {
+    DisallowHeapAllocation no_gc;
+    Oddball* the_hole = isolate_->heap()->the_hole_value();
+    int capacity = table->UsedCapacity();
+    int result_index = 0;
+    for (int i = 0; i < capacity; i++) {
+      Object* key = table->KeyAt(i);
+      if (key == the_hole) continue;
+      entries->set(result_index++, key);
+      entries->set(result_index++, table->ValueAt(i));
+    }
+    DCHECK_EQ(result_index, length);
+  }
+
+  // Then write it out.
+  WriteTag(SerializationTag::kBeginJSMap);
+  for (int i = 0; i < length; i++) {
+    if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+      return Nothing<bool>();
+    }
+  }
+  WriteTag(SerializationTag::kEndJSMap);
+  WriteVarint<uint32_t>(length);
+  return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
+  // First copy the element pointers, since getters could mutate them.
+  Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+  int length = table->NumberOfElements();
+  Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+  {
+    DisallowHeapAllocation no_gc;
+    Oddball* the_hole = isolate_->heap()->the_hole_value();
+    int capacity = table->UsedCapacity();
+    int result_index = 0;
+    for (int i = 0; i < capacity; i++) {
+      Object* key = table->KeyAt(i);
+      if (key == the_hole) continue;
+      entries->set(result_index++, key);
+    }
+    DCHECK_EQ(result_index, length);
+  }
+
+  // Then write it out.
+  WriteTag(SerializationTag::kBeginJSSet);
+  for (int i = 0; i < length; i++) {
+    if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+      return Nothing<bool>();
+    }
+  }
+  WriteTag(SerializationTag::kEndJSSet);
+  WriteVarint<uint32_t>(length);
+  return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
+  uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
+  if (transfer_entry) {
+    DCHECK(array_buffer->was_neutered() || array_buffer->is_shared());
+    WriteTag(array_buffer->is_shared()
+                 ? SerializationTag::kSharedArrayBufferTransfer
+                 : SerializationTag::kArrayBufferTransfer);
+    WriteVarint(*transfer_entry);
+    return Just(true);
+  }
+
+  if (array_buffer->is_shared()) {
+    ThrowDataCloneError(
+        MessageTemplate::kDataCloneErrorSharedArrayBufferNotTransferred);
+    return Nothing<bool>();
+  }
+  if (array_buffer->was_neutered()) {
+    ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
+    return Nothing<bool>();
+  }
+  double byte_length = array_buffer->byte_length()->Number();
+  if (byte_length > std::numeric_limits<uint32_t>::max()) {
+    ThrowDataCloneError(MessageTemplate::kDataCloneError, handle(array_buffer));
+    return Nothing<bool>();
+  }
+  WriteTag(SerializationTag::kArrayBuffer);
+  WriteVarint<uint32_t>(byte_length);
+  WriteRawBytes(array_buffer->backing_store(), byte_length);
+  return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
+  WriteTag(SerializationTag::kArrayBufferView);
+  ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
+  if (view->IsJSTypedArray()) {
+    switch (JSTypedArray::cast(view)->type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    tag = ArrayBufferViewTag::k##Type##Array;           \
+    break;
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+    }
+  } else {
+    DCHECK(view->IsJSDataView());
+    tag = ArrayBufferViewTag::kDataView;
+  }
+  WriteVarint(static_cast<uint8_t>(tag));
+  WriteVarint(NumberToUint32(view->byte_offset()));
+  WriteVarint(NumberToUint32(view->byte_length()));
+  return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
+  if (!delegate_) {
+    isolate_->Throw(*isolate_->factory()->NewError(
+        isolate_->error_function(), MessageTemplate::kDataCloneError, object));
+    return Nothing<bool>();
+  }
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+  Maybe<bool> result =
+      delegate_->WriteHostObject(v8_isolate, Utils::ToLocal(object));
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+  DCHECK(!result.IsNothing());
+  return result;
+}
+
+Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
     Handle<JSObject> object, Handle<FixedArray> keys) {
   uint32_t properties_written = 0;
   int length = keys->length();
@@ -445,25 +758,56 @@
   return Just(properties_written);
 }
 
+void ValueSerializer::ThrowDataCloneError(
+    MessageTemplate::Template template_index) {
+  return ThrowDataCloneError(template_index,
+                             isolate_->factory()->empty_string());
+}
+
+void ValueSerializer::ThrowDataCloneError(
+    MessageTemplate::Template template_index, Handle<Object> arg0) {
+  Handle<String> message =
+      MessageTemplate::FormatMessage(isolate_, template_index, arg0);
+  if (delegate_) {
+    delegate_->ThrowDataCloneError(Utils::ToLocal(message));
+  } else {
+    isolate_->Throw(
+        *isolate_->factory()->NewError(isolate_->error_function(), message));
+  }
+  if (isolate_->has_scheduled_exception()) {
+    isolate_->PromoteScheduledException();
+  }
+}
+
 ValueDeserializer::ValueDeserializer(Isolate* isolate,
-                                     Vector<const uint8_t> data)
+                                     Vector<const uint8_t> data,
+                                     v8::ValueDeserializer::Delegate* delegate)
     : isolate_(isolate),
+      delegate_(delegate),
       position_(data.start()),
       end_(data.start() + data.length()),
-      id_map_(Handle<SeededNumberDictionary>::cast(
-          isolate->global_handles()->Create(
-              *SeededNumberDictionary::New(isolate, 0)))) {}
+      pretenure_(data.length() > kPretenureThreshold ? TENURED : NOT_TENURED),
+      id_map_(Handle<FixedArray>::cast(isolate->global_handles()->Create(
+          isolate_->heap()->empty_fixed_array()))) {}
 
 ValueDeserializer::~ValueDeserializer() {
   GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+
+  Handle<Object> transfer_map_handle;
+  if (array_buffer_transfer_map_.ToHandle(&transfer_map_handle)) {
+    GlobalHandles::Destroy(transfer_map_handle.location());
+  }
 }
 
 Maybe<bool> ValueDeserializer::ReadHeader() {
   if (position_ < end_ &&
       *position_ == static_cast<uint8_t>(SerializationTag::kVersion)) {
     ReadTag().ToChecked();
-    if (!ReadVarint<uint32_t>().To(&version_)) return Nothing<bool>();
-    if (version_ > kLatestVersion) return Nothing<bool>();
+    if (!ReadVarint<uint32_t>().To(&version_) || version_ > kLatestVersion) {
+      isolate_->Throw(*isolate_->factory()->NewError(
+          MessageTemplate::kDataCloneDeserializationVersionError));
+      return Nothing<bool>();
+    }
   }
   return Just(true);
 }
@@ -511,7 +855,7 @@
     if (position_ >= end_) return Nothing<T>();
     uint8_t byte = *position_;
     if (V8_LIKELY(shift < sizeof(T) * 8)) {
-      value |= (byte & 0x7f) << shift;
+      value |= static_cast<T>(byte & 0x7f) << shift;
       shift += 7;
     }
     has_another_byte = byte & 0x80;
@@ -551,7 +895,67 @@
   return Just(Vector<const uint8_t>(start, size));
 }
 
+bool ValueDeserializer::ReadUint32(uint32_t* value) {
+  return ReadVarint<uint32_t>().To(value);
+}
+
+bool ValueDeserializer::ReadUint64(uint64_t* value) {
+  return ReadVarint<uint64_t>().To(value);
+}
+
+bool ValueDeserializer::ReadDouble(double* value) {
+  return ReadDouble().To(value);
+}
+
+bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
+  if (length > static_cast<size_t>(end_ - position_)) return false;
+  *data = position_;
+  position_ += length;
+  return true;
+}
+
+void ValueDeserializer::TransferArrayBuffer(
+    uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
+  if (array_buffer_transfer_map_.is_null()) {
+    array_buffer_transfer_map_ =
+        Handle<SeededNumberDictionary>::cast(isolate_->global_handles()->Create(
+            *SeededNumberDictionary::New(isolate_, 0)));
+  }
+  Handle<SeededNumberDictionary> dictionary =
+      array_buffer_transfer_map_.ToHandleChecked();
+  const bool used_as_prototype = false;
+  Handle<SeededNumberDictionary> new_dictionary =
+      SeededNumberDictionary::AtNumberPut(dictionary, transfer_id, array_buffer,
+                                          used_as_prototype);
+  if (!new_dictionary.is_identical_to(dictionary)) {
+    GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
+    array_buffer_transfer_map_ = Handle<SeededNumberDictionary>::cast(
+        isolate_->global_handles()->Create(*new_dictionary));
+  }
+}
+
 MaybeHandle<Object> ValueDeserializer::ReadObject() {
+  MaybeHandle<Object> result = ReadObjectInternal();
+
+  // ArrayBufferView is special in that it consumes the value before it, even
+  // after format version 0.
+  Handle<Object> object;
+  SerializationTag tag;
+  if (result.ToHandle(&object) && V8_UNLIKELY(object->IsJSArrayBuffer()) &&
+      PeekTag().To(&tag) && tag == SerializationTag::kArrayBufferView) {
+    ConsumeTag(SerializationTag::kArrayBufferView);
+    result = ReadJSArrayBufferView(Handle<JSArrayBuffer>::cast(object));
+  }
+
+  if (result.is_null() && !isolate_->has_pending_exception()) {
+    isolate_->Throw(*isolate_->factory()->NewError(
+        MessageTemplate::kDataCloneDeserializationError));
+  }
+
+  return result;
+}
+
+MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
   SerializationTag tag;
   if (!ReadTag().To(&tag)) return MaybeHandle<Object>();
   switch (tag) {
@@ -570,17 +974,19 @@
     case SerializationTag::kInt32: {
       Maybe<int32_t> number = ReadZigZag<int32_t>();
       if (number.IsNothing()) return MaybeHandle<Object>();
-      return isolate_->factory()->NewNumberFromInt(number.FromJust());
+      return isolate_->factory()->NewNumberFromInt(number.FromJust(),
+                                                   pretenure_);
     }
     case SerializationTag::kUint32: {
       Maybe<uint32_t> number = ReadVarint<uint32_t>();
       if (number.IsNothing()) return MaybeHandle<Object>();
-      return isolate_->factory()->NewNumberFromUint(number.FromJust());
+      return isolate_->factory()->NewNumberFromUint(number.FromJust(),
+                                                    pretenure_);
     }
     case SerializationTag::kDouble: {
       Maybe<double> number = ReadDouble();
       if (number.IsNothing()) return MaybeHandle<Object>();
-      return isolate_->factory()->NewNumber(number.FromJust());
+      return isolate_->factory()->NewNumber(number.FromJust(), pretenure_);
     }
     case SerializationTag::kUtf8String:
       return ReadUtf8String();
@@ -606,8 +1012,25 @@
       return ReadJSValue(tag);
     case SerializationTag::kRegExp:
       return ReadJSRegExp();
+    case SerializationTag::kBeginJSMap:
+      return ReadJSMap();
+    case SerializationTag::kBeginJSSet:
+      return ReadJSSet();
+    case SerializationTag::kArrayBuffer:
+      return ReadJSArrayBuffer();
+    case SerializationTag::kArrayBufferTransfer: {
+      const bool is_shared = false;
+      return ReadTransferredJSArrayBuffer(is_shared);
+    }
+    case SerializationTag::kSharedArrayBufferTransfer: {
+      const bool is_shared = true;
+      return ReadTransferredJSArrayBuffer(is_shared);
+    }
     default:
-      return MaybeHandle<Object>();
+      // TODO(jbroman): Introduce an explicit tag for host objects to avoid
+      // having to treat every unknown tag as a potential host object.
+      position_--;
+      return ReadHostObject();
   }
 }
 
@@ -620,7 +1043,7 @@
       !ReadRawBytes(utf8_length).To(&utf8_bytes))
     return MaybeHandle<String>();
   return isolate_->factory()->NewStringFromUtf8(
-      Vector<const char>::cast(utf8_bytes));
+      Vector<const char>::cast(utf8_bytes), pretenure_);
 }
 
 MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
@@ -636,7 +1059,7 @@
   // string on the heap (regardless of alignment).
   Handle<SeqTwoByteString> string;
   if (!isolate_->factory()
-           ->NewRawTwoByteString(byte_length / sizeof(uc16))
+           ->NewRawTwoByteString(byte_length / sizeof(uc16), pretenure_)
            .ToHandle(&string))
     return MaybeHandle<String>();
 
@@ -646,19 +1069,59 @@
   return string;
 }
 
+bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
+  // In the case of failure, the position in the stream is reset.
+  const uint8_t* original_position = position_;
+
+  SerializationTag tag;
+  uint32_t byte_length;
+  Vector<const uint8_t> bytes;
+  if (!ReadTag().To(&tag) || !ReadVarint<uint32_t>().To(&byte_length) ||
+      byte_length >
+          static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+      !ReadRawBytes(byte_length).To(&bytes)) {
+    position_ = original_position;
+    return false;
+  }
+
+  expected = String::Flatten(expected);
+  DisallowHeapAllocation no_gc;
+  String::FlatContent flat = expected->GetFlatContent();
+
+  // If the bytes are verbatim what is in the flattened string, then the string
+  // is successfully consumed.
+  if (tag == SerializationTag::kUtf8String && flat.IsOneByte()) {
+    Vector<const uint8_t> chars = flat.ToOneByteVector();
+    if (byte_length == chars.length() &&
+        String::IsAscii(chars.begin(), chars.length()) &&
+        memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+      return true;
+    }
+  } else if (tag == SerializationTag::kTwoByteString && flat.IsTwoByte()) {
+    Vector<const uc16> chars = flat.ToUC16Vector();
+    if (byte_length == static_cast<unsigned>(chars.length()) * sizeof(uc16) &&
+        memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+      return true;
+    }
+  }
+
+  position_ = original_position;
+  return false;
+}
+
 MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
   // If we are at the end of the stack, abort. This function may recurse.
-  if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSObject>();
+  STACK_CHECK(isolate_, MaybeHandle<JSObject>());
 
   uint32_t id = next_id_++;
   HandleScope scope(isolate_);
   Handle<JSObject> object =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObject(isolate_->object_function(), pretenure_);
   AddObjectWithID(id, object);
 
   uint32_t num_properties;
   uint32_t expected_num_properties;
-  if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject)
+  if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject, true)
            .To(&num_properties) ||
       !ReadVarint<uint32_t>().To(&expected_num_properties) ||
       num_properties != expected_num_properties) {
@@ -671,21 +1134,22 @@
 
 MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
   // If we are at the end of the stack, abort. This function may recurse.
-  if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+  STACK_CHECK(isolate_, MaybeHandle<JSArray>());
 
   uint32_t length;
   if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
 
   uint32_t id = next_id_++;
   HandleScope scope(isolate_);
-  Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+  Handle<JSArray> array = isolate_->factory()->NewJSArray(
+      0, TERMINAL_FAST_ELEMENTS_KIND, pretenure_);
   JSArray::SetLength(array, length);
   AddObjectWithID(id, array);
 
   uint32_t num_properties;
   uint32_t expected_num_properties;
   uint32_t expected_length;
-  if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray)
+  if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray, false)
            .To(&num_properties) ||
       !ReadVarint<uint32_t>().To(&expected_num_properties) ||
       !ReadVarint<uint32_t>().To(&expected_length) ||
@@ -699,7 +1163,7 @@
 
 MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
   // If we are at the end of the stack, abort. This function may recurse.
-  if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+  STACK_CHECK(isolate_, MaybeHandle<JSArray>());
 
   uint32_t length;
   if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
@@ -707,7 +1171,8 @@
   uint32_t id = next_id_++;
   HandleScope scope(isolate_);
   Handle<JSArray> array = isolate_->factory()->NewJSArray(
-      FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+      pretenure_);
   AddObjectWithID(id, array);
 
   Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
@@ -722,7 +1187,7 @@
   uint32_t num_properties;
   uint32_t expected_num_properties;
   uint32_t expected_length;
-  if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray)
+  if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray, false)
            .To(&num_properties) ||
       !ReadVarint<uint32_t>().To(&expected_num_properties) ||
       !ReadVarint<uint32_t>().To(&expected_length) ||
@@ -752,29 +1217,30 @@
   Handle<JSValue> value;
   switch (tag) {
     case SerializationTag::kTrueObject:
-      value = Handle<JSValue>::cast(
-          isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+      value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+          isolate_->boolean_function(), pretenure_));
       value->set_value(isolate_->heap()->true_value());
       break;
     case SerializationTag::kFalseObject:
-      value = Handle<JSValue>::cast(
-          isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+      value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+          isolate_->boolean_function(), pretenure_));
       value->set_value(isolate_->heap()->false_value());
       break;
     case SerializationTag::kNumberObject: {
       double number;
       if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
-      value = Handle<JSValue>::cast(
-          isolate_->factory()->NewJSObject(isolate_->number_function()));
-      Handle<Object> number_object = isolate_->factory()->NewNumber(number);
+      value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+          isolate_->number_function(), pretenure_));
+      Handle<Object> number_object =
+          isolate_->factory()->NewNumber(number, pretenure_);
       value->set_value(*number_object);
       break;
     }
     case SerializationTag::kStringObject: {
       Handle<String> string;
       if (!ReadUtf8String().ToHandle(&string)) return MaybeHandle<JSValue>();
-      value = Handle<JSValue>::cast(
-          isolate_->factory()->NewJSObject(isolate_->string_function()));
+      value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+          isolate_->string_function(), pretenure_));
       value->set_value(*string);
       break;
     }
@@ -801,9 +1267,296 @@
   return regexp;
 }
 
+MaybeHandle<JSMap> ValueDeserializer::ReadJSMap() {
+  // If we are at the end of the stack, abort. This function may recurse.
+  STACK_CHECK(isolate_, MaybeHandle<JSMap>());
+
+  HandleScope scope(isolate_);
+  uint32_t id = next_id_++;
+  Handle<JSMap> map = isolate_->factory()->NewJSMap();
+  AddObjectWithID(id, map);
+
+  Handle<JSFunction> map_set = isolate_->map_set();
+  uint32_t length = 0;
+  while (true) {
+    SerializationTag tag;
+    if (!PeekTag().To(&tag)) return MaybeHandle<JSMap>();
+    if (tag == SerializationTag::kEndJSMap) {
+      ConsumeTag(SerializationTag::kEndJSMap);
+      break;
+    }
+
+    Handle<Object> argv[2];
+    if (!ReadObject().ToHandle(&argv[0]) || !ReadObject().ToHandle(&argv[1]) ||
+        Execution::Call(isolate_, map_set, map, arraysize(argv), argv)
+            .is_null()) {
+      return MaybeHandle<JSMap>();
+    }
+    length += 2;
+  }
+
+  uint32_t expected_length;
+  if (!ReadVarint<uint32_t>().To(&expected_length) ||
+      length != expected_length) {
+    return MaybeHandle<JSMap>();
+  }
+  DCHECK(HasObjectWithID(id));
+  return scope.CloseAndEscape(map);
+}
+
+MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
+  // If we are at the end of the stack, abort. This function may recurse.
+  STACK_CHECK(isolate_, MaybeHandle<JSSet>());
+
+  HandleScope scope(isolate_);
+  uint32_t id = next_id_++;
+  Handle<JSSet> set = isolate_->factory()->NewJSSet();
+  AddObjectWithID(id, set);
+  Handle<JSFunction> set_add = isolate_->set_add();
+  uint32_t length = 0;
+  while (true) {
+    SerializationTag tag;
+    if (!PeekTag().To(&tag)) return MaybeHandle<JSSet>();
+    if (tag == SerializationTag::kEndJSSet) {
+      ConsumeTag(SerializationTag::kEndJSSet);
+      break;
+    }
+
+    Handle<Object> argv[1];
+    if (!ReadObject().ToHandle(&argv[0]) ||
+        Execution::Call(isolate_, set_add, set, arraysize(argv), argv)
+            .is_null()) {
+      return MaybeHandle<JSSet>();
+    }
+    length++;
+  }
+
+  uint32_t expected_length;
+  if (!ReadVarint<uint32_t>().To(&expected_length) ||
+      length != expected_length) {
+    return MaybeHandle<JSSet>();
+  }
+  DCHECK(HasObjectWithID(id));
+  return scope.CloseAndEscape(set);
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
+  uint32_t id = next_id_++;
+  uint32_t byte_length;
+  Vector<const uint8_t> bytes;
+  if (!ReadVarint<uint32_t>().To(&byte_length) ||
+      byte_length > static_cast<size_t>(end_ - position_)) {
+    return MaybeHandle<JSArrayBuffer>();
+  }
+  const bool should_initialize = false;
+  Handle<JSArrayBuffer> array_buffer =
+      isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, pretenure_);
+  JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
+                                     should_initialize);
+  memcpy(array_buffer->backing_store(), position_, byte_length);
+  position_ += byte_length;
+  AddObjectWithID(id, array_buffer);
+  return array_buffer;
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
+    bool is_shared) {
+  uint32_t id = next_id_++;
+  uint32_t transfer_id;
+  Handle<SeededNumberDictionary> transfer_map;
+  if (!ReadVarint<uint32_t>().To(&transfer_id) ||
+      !array_buffer_transfer_map_.ToHandle(&transfer_map)) {
+    return MaybeHandle<JSArrayBuffer>();
+  }
+  int index = transfer_map->FindEntry(isolate_, transfer_id);
+  if (index == SeededNumberDictionary::kNotFound) {
+    return MaybeHandle<JSArrayBuffer>();
+  }
+  Handle<JSArrayBuffer> array_buffer(
+      JSArrayBuffer::cast(transfer_map->ValueAt(index)), isolate_);
+  DCHECK_EQ(is_shared, array_buffer->is_shared());
+  AddObjectWithID(id, array_buffer);
+  return array_buffer;
+}
+
+MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
+    Handle<JSArrayBuffer> buffer) {
+  uint32_t buffer_byte_length = NumberToUint32(buffer->byte_length());
+  uint8_t tag = 0;
+  uint32_t byte_offset = 0;
+  uint32_t byte_length = 0;
+  if (!ReadVarint<uint8_t>().To(&tag) ||
+      !ReadVarint<uint32_t>().To(&byte_offset) ||
+      !ReadVarint<uint32_t>().To(&byte_length) ||
+      byte_offset > buffer_byte_length ||
+      byte_length > buffer_byte_length - byte_offset) {
+    return MaybeHandle<JSArrayBufferView>();
+  }
+  uint32_t id = next_id_++;
+  ExternalArrayType external_array_type = kExternalInt8Array;
+  unsigned element_size = 0;
+  switch (static_cast<ArrayBufferViewTag>(tag)) {
+    case ArrayBufferViewTag::kDataView: {
+      Handle<JSDataView> data_view =
+          isolate_->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+      AddObjectWithID(id, data_view);
+      return data_view;
+    }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case ArrayBufferViewTag::k##Type##Array:              \
+    external_array_type = kExternal##Type##Array;       \
+    element_size = size;                                \
+    break;
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+  }
+  if (byte_offset % element_size != 0 || byte_length % element_size != 0) {
+    return MaybeHandle<JSArrayBufferView>();
+  }
+  Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
+      external_array_type, buffer, byte_offset, byte_length / element_size,
+      pretenure_);
+  AddObjectWithID(id, typed_array);
+  return typed_array;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
+  if (!delegate_) return MaybeHandle<JSObject>();
+  STACK_CHECK(isolate_, MaybeHandle<JSObject>());
+  uint32_t id = next_id_++;
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+  v8::Local<v8::Object> object;
+  if (!delegate_->ReadHostObject(v8_isolate).ToLocal(&object)) {
+    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
+    return MaybeHandle<JSObject>();
+  }
+  Handle<JSObject> js_object =
+      Handle<JSObject>::cast(Utils::OpenHandle(*object));
+  AddObjectWithID(id, js_object);
+  return js_object;
+}
+
+// Copies a vector of property values into an object, given the map that should
+// be used.
+static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
+                             const std::vector<Handle<Object>>& properties) {
+  JSObject::AllocateStorageForMap(object, map);
+  DCHECK(!object->map()->is_dictionary_map());
+
+  DisallowHeapAllocation no_gc;
+  DescriptorArray* descriptors = object->map()->instance_descriptors();
+  for (unsigned i = 0; i < properties.size(); i++) {
+    object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
+  }
+}
+
 Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
-    Handle<JSObject> object, SerializationTag end_tag) {
-  for (uint32_t num_properties = 0;; num_properties++) {
+    Handle<JSObject> object, SerializationTag end_tag,
+    bool can_use_transitions) {
+  uint32_t num_properties = 0;
+
+  // Fast path (following map transitions).
+  if (can_use_transitions) {
+    bool transitioning = true;
+    Handle<Map> map(object->map(), isolate_);
+    DCHECK(!map->is_dictionary_map());
+    DCHECK(map->instance_descriptors()->IsEmpty());
+    std::vector<Handle<Object>> properties;
+    properties.reserve(8);
+
+    while (transitioning) {
+      // If there are no more properties, finish.
+      SerializationTag tag;
+      if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
+      if (tag == end_tag) {
+        ConsumeTag(end_tag);
+        CommitProperties(object, map, properties);
+        CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+        return Just(static_cast<uint32_t>(properties.size()));
+      }
+
+      // Determine the key to be used and the target map to transition to, if
+      // possible. Transitioning may abort if the key is not a string, or if no
+      // transition was found.
+      Handle<Object> key;
+      Handle<Map> target;
+      Handle<String> expected_key = TransitionArray::ExpectedTransitionKey(map);
+      if (!expected_key.is_null() && ReadExpectedString(expected_key)) {
+        key = expected_key;
+        target = TransitionArray::ExpectedTransitionTarget(map);
+      } else {
+        if (!ReadObject().ToHandle(&key)) return Nothing<uint32_t>();
+        if (key->IsString()) {
+          key =
+              isolate_->factory()->InternalizeString(Handle<String>::cast(key));
+          target = TransitionArray::FindTransitionToField(
+              map, Handle<String>::cast(key));
+          transitioning = !target.is_null();
+        } else {
+          transitioning = false;
+        }
+      }
+
+      // Read the value that corresponds to it.
+      Handle<Object> value;
+      if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
+
+      // If still transitioning and the value fits the field representation
+      // (though generalization may be required), store the property value so
+      // that we can copy them all at once. Otherwise, stop transitioning.
+      if (transitioning) {
+        int descriptor = static_cast<int>(properties.size());
+        PropertyDetails details =
+            target->instance_descriptors()->GetDetails(descriptor);
+        Representation expected_representation = details.representation();
+        if (value->FitsRepresentation(expected_representation)) {
+          if (expected_representation.IsHeapObject() &&
+              !target->instance_descriptors()
+                   ->GetFieldType(descriptor)
+                   ->NowContains(value)) {
+            Handle<FieldType> value_type =
+                value->OptimalType(isolate_, expected_representation);
+            Map::GeneralizeFieldType(target, descriptor,
+                                     expected_representation, value_type);
+          }
+          DCHECK(target->instance_descriptors()
+                     ->GetFieldType(descriptor)
+                     ->NowContains(value));
+          properties.push_back(value);
+          map = target;
+          continue;
+        } else {
+          transitioning = false;
+        }
+      }
+
+      // Fell out of transitioning fast path. Commit the properties gathered so
+      // far, and then start setting properties slowly instead.
+      DCHECK(!transitioning);
+      CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+      CommitProperties(object, map, properties);
+      num_properties = static_cast<uint32_t>(properties.size());
+
+      bool success;
+      LookupIterator it = LookupIterator::PropertyOrElement(
+          isolate_, object, key, &success, LookupIterator::OWN);
+      if (!success ||
+          JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+              .is_null()) {
+        return Nothing<uint32_t>();
+      }
+      num_properties++;
+    }
+
+    // At this point, transitioning should be done, but at least one property
+    // should have been written (in the zero-property case, there is an early
+    // return).
+    DCHECK(!transitioning);
+    DCHECK_GE(num_properties, 1u);
+  }
+
+  // Slow path.
+  for (;; num_properties++) {
     SerializationTag tag;
     if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
     if (tag == end_tag) {
@@ -828,15 +1581,16 @@
 }
 
 bool ValueDeserializer::HasObjectWithID(uint32_t id) {
-  return id_map_->Has(isolate_, id);
+  return id < static_cast<unsigned>(id_map_->length()) &&
+         !id_map_->get(id)->IsTheHole(isolate_);
 }
 
 MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
-  int index = id_map_->FindEntry(isolate_, id);
-  if (index == SeededNumberDictionary::kNotFound) {
+  if (id >= static_cast<unsigned>(id_map_->length())) {
     return MaybeHandle<JSReceiver>();
   }
-  Object* value = id_map_->ValueAt(index);
+  Object* value = id_map_->get(id);
+  if (value->IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
   DCHECK(value->IsJSReceiver());
   return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
 }
@@ -844,16 +1598,13 @@
 void ValueDeserializer::AddObjectWithID(uint32_t id,
                                         Handle<JSReceiver> object) {
   DCHECK(!HasObjectWithID(id));
-  const bool used_as_prototype = false;
-  Handle<SeededNumberDictionary> new_dictionary =
-      SeededNumberDictionary::AtNumberPut(id_map_, id, object,
-                                          used_as_prototype);
+  Handle<FixedArray> new_array = FixedArray::SetAndGrow(id_map_, id, object);
 
   // If the dictionary was reallocated, update the global handle.
-  if (!new_dictionary.is_identical_to(id_map_)) {
+  if (!new_array.is_identical_to(id_map_)) {
     GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
-    id_map_ = Handle<SeededNumberDictionary>::cast(
-        isolate_->global_handles()->Create(*new_dictionary));
+    id_map_ = Handle<FixedArray>::cast(
+        isolate_->global_handles()->Create(*new_array));
   }
 }
 
@@ -878,8 +1629,7 @@
 
 MaybeHandle<Object>
 ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
-  if (version_ > 0) return MaybeHandle<Object>();
-
+  DCHECK_EQ(version_, 0);
   HandleScope scope(isolate_);
   std::vector<Handle<Object>> stack;
   while (position_ < end_) {
@@ -901,8 +1651,8 @@
 
         size_t begin_properties =
             stack.size() - 2 * static_cast<size_t>(num_properties);
-        Handle<JSObject> js_object =
-            isolate_->factory()->NewJSObject(isolate_->object_function());
+        Handle<JSObject> js_object = isolate_->factory()->NewJSObject(
+            isolate_->object_function(), pretenure_);
         if (num_properties &&
             !SetPropertiesFromKeyValuePairs(
                  isolate_, js_object, &stack[begin_properties], num_properties)
@@ -926,7 +1676,8 @@
           return MaybeHandle<Object>();
         }
 
-        Handle<JSArray> js_array = isolate_->factory()->NewJSArray(0);
+        Handle<JSArray> js_array = isolate_->factory()->NewJSArray(
+            0, TERMINAL_FAST_ELEMENTS_KIND, pretenure_);
         JSArray::SetLength(js_array, length);
         size_t begin_properties =
             stack.size() - 2 * static_cast<size_t>(num_properties);
@@ -941,9 +1692,12 @@
         new_object = js_array;
         break;
       }
-      case SerializationTag::kEndDenseJSArray:
+      case SerializationTag::kEndDenseJSArray: {
         // This was already broken in Chromium, and apparently wasn't missed.
+        isolate_->Throw(*isolate_->factory()->NewError(
+            MessageTemplate::kDataCloneDeserializationError));
         return MaybeHandle<Object>();
+      }
       default:
         if (!ReadObject().ToHandle(&new_object)) return MaybeHandle<Object>();
         break;
@@ -959,7 +1713,11 @@
 #endif
   position_ = end_;
 
-  if (stack.size() != 1) return MaybeHandle<Object>();
+  if (stack.size() != 1) {
+    isolate_->Throw(*isolate_->factory()->NewError(
+        MessageTemplate::kDataCloneDeserializationError));
+    return MaybeHandle<Object>();
+  }
   return scope.CloseAndEscape(stack[0]);
 }
 
diff --git a/src/value-serializer.h b/src/value-serializer.h
index ab9c664..27ce0c1 100644
--- a/src/value-serializer.h
+++ b/src/value-serializer.h
@@ -12,16 +12,21 @@
 #include "src/base/compiler-specific.h"
 #include "src/base/macros.h"
 #include "src/identity-map.h"
+#include "src/messages.h"
 #include "src/vector.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
 class HeapNumber;
 class Isolate;
+class JSArrayBuffer;
+class JSArrayBufferView;
 class JSDate;
+class JSMap;
 class JSRegExp;
+class JSSet;
 class JSValue;
 class Object;
 class Oddball;
@@ -37,7 +42,7 @@
  */
 class ValueSerializer {
  public:
-  explicit ValueSerializer(Isolate* isolate);
+  ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
   ~ValueSerializer();
 
   /*
@@ -56,6 +61,23 @@
    */
   std::vector<uint8_t> ReleaseBuffer() { return std::move(buffer_); }
 
+  /*
+   * Marks an ArrayBuffer as havings its contents transferred out of band.
+   * Pass the corresponding JSArrayBuffer in the deserializing context to
+   * ValueDeserializer::TransferArrayBuffer.
+   */
+  void TransferArrayBuffer(uint32_t transfer_id,
+                           Handle<JSArrayBuffer> array_buffer);
+
+  /*
+   * Publicly exposed wire format writing methods.
+   * These are intended for use within the delegate's WriteHostObject method.
+   */
+  void WriteUint32(uint32_t value);
+  void WriteUint64(uint64_t value);
+  void WriteRawBytes(const void* source, size_t length);
+  void WriteDouble(double value);
+
  private:
   // Writing the wire format.
   void WriteTag(SerializationTag tag);
@@ -63,7 +85,6 @@
   void WriteVarint(T value);
   template <typename T>
   void WriteZigZag(T value);
-  void WriteDouble(double value);
   void WriteOneByteString(Vector<const uint8_t> chars);
   void WriteTwoByteString(Vector<const uc16> chars);
   uint8_t* ReserveRawBytes(size_t bytes);
@@ -75,20 +96,35 @@
   void WriteString(Handle<String> string);
   Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver) WARN_UNUSED_RESULT;
   Maybe<bool> WriteJSObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
+  Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) WARN_UNUSED_RESULT;
   Maybe<bool> WriteJSArray(Handle<JSArray> array) WARN_UNUSED_RESULT;
   void WriteJSDate(JSDate* date);
   Maybe<bool> WriteJSValue(Handle<JSValue> value) WARN_UNUSED_RESULT;
   void WriteJSRegExp(JSRegExp* regexp);
+  Maybe<bool> WriteJSMap(Handle<JSMap> map) WARN_UNUSED_RESULT;
+  Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
+  Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
+  Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
+  Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
 
   /*
    * Reads the specified keys from the object and writes key-value pairs to the
    * buffer. Returns the number of keys actually written, which may be smaller
    * if some keys are not own properties when accessed.
    */
-  Maybe<uint32_t> WriteJSObjectProperties(
+  Maybe<uint32_t> WriteJSObjectPropertiesSlow(
       Handle<JSObject> object, Handle<FixedArray> keys) WARN_UNUSED_RESULT;
 
+  /*
+   * Asks the delegate to handle an error that occurred during data cloning, by
+   * throwing an exception appropriate for the host.
+   */
+  void ThrowDataCloneError(MessageTemplate::Template template_index);
+  V8_NOINLINE void ThrowDataCloneError(MessageTemplate::Template template_index,
+                                       Handle<Object> arg0);
+
   Isolate* const isolate_;
+  v8::ValueSerializer::Delegate* const delegate_;
   std::vector<uint8_t> buffer_;
   Zone zone_;
 
@@ -98,6 +134,9 @@
   IdentityMap<uint32_t> id_map_;
   uint32_t next_id_ = 0;
 
+  // A similar map, for transferred array buffers.
+  IdentityMap<uint32_t> array_buffer_transfer_map_;
+
   DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
 };
 
@@ -107,7 +146,8 @@
  */
 class ValueDeserializer {
  public:
-  ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data);
+  ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
+                    v8::ValueDeserializer::Delegate* delegate);
   ~ValueDeserializer();
 
   /*
@@ -116,6 +156,13 @@
   Maybe<bool> ReadHeader() WARN_UNUSED_RESULT;
 
   /*
+   * Reads the underlying wire format version. Likely mostly to be useful to
+   * legacy code reading old wire format versions. Must be called after
+   * ReadHeader.
+   */
+  uint32_t GetWireFormatVersion() const { return version_; }
+
+  /*
    * Deserializes a V8 object from the buffer.
    */
   MaybeHandle<Object> ReadObject() WARN_UNUSED_RESULT;
@@ -130,6 +177,22 @@
   MaybeHandle<Object> ReadObjectUsingEntireBufferForLegacyFormat()
       WARN_UNUSED_RESULT;
 
+  /*
+   * Accepts the array buffer corresponding to the one passed previously to
+   * ValueSerializer::TransferArrayBuffer.
+   */
+  void TransferArrayBuffer(uint32_t transfer_id,
+                           Handle<JSArrayBuffer> array_buffer);
+
+  /*
+   * Publicly exposed wire format writing methods.
+   * These are intended for use within the delegate's WriteHostObject method.
+   */
+  bool ReadUint32(uint32_t* value) WARN_UNUSED_RESULT;
+  bool ReadUint64(uint64_t* value) WARN_UNUSED_RESULT;
+  bool ReadDouble(double* value) WARN_UNUSED_RESULT;
+  bool ReadRawBytes(size_t length, const void** data) WARN_UNUSED_RESULT;
+
  private:
   // Reading the wire format.
   Maybe<SerializationTag> PeekTag() const WARN_UNUSED_RESULT;
@@ -142,6 +205,14 @@
   Maybe<double> ReadDouble() WARN_UNUSED_RESULT;
   Maybe<Vector<const uint8_t>> ReadRawBytes(int size) WARN_UNUSED_RESULT;
 
+  // Reads a string if it matches the one provided.
+  // Returns true if this was the case. Otherwise, nothing is consumed.
+  bool ReadExpectedString(Handle<String> expected) WARN_UNUSED_RESULT;
+
+  // Like ReadObject, but skips logic for special cases in simulating the
+  // "stack machine".
+  MaybeHandle<Object> ReadObjectInternal() WARN_UNUSED_RESULT;
+
   // Reading V8 objects of specific kinds.
   // The tag is assumed to have already been read.
   MaybeHandle<String> ReadUtf8String() WARN_UNUSED_RESULT;
@@ -152,13 +223,22 @@
   MaybeHandle<JSDate> ReadJSDate() WARN_UNUSED_RESULT;
   MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) WARN_UNUSED_RESULT;
   MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
+  MaybeHandle<JSMap> ReadJSMap() WARN_UNUSED_RESULT;
+  MaybeHandle<JSSet> ReadJSSet() WARN_UNUSED_RESULT;
+  MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer() WARN_UNUSED_RESULT;
+  MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer(bool is_shared)
+      WARN_UNUSED_RESULT;
+  MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
+      Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
+  MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
 
   /*
    * Reads key-value pairs into the object until the specified end tag is
    * encountered. If successful, returns the number of properties read.
    */
   Maybe<uint32_t> ReadJSObjectProperties(Handle<JSObject> object,
-                                         SerializationTag end_tag);
+                                         SerializationTag end_tag,
+                                         bool can_use_transitions);
 
   // Manipulating the map from IDs to reified objects.
   bool HasObjectWithID(uint32_t id);
@@ -166,12 +246,17 @@
   void AddObjectWithID(uint32_t id, Handle<JSReceiver> object);
 
   Isolate* const isolate_;
+  v8::ValueDeserializer::Delegate* const delegate_;
   const uint8_t* position_;
   const uint8_t* const end_;
+  PretenureFlag pretenure_;
   uint32_t version_ = 0;
-  Handle<SeededNumberDictionary> id_map_;  // Always a global handle.
   uint32_t next_id_ = 0;
 
+  // Always global handles.
+  Handle<FixedArray> id_map_;
+  MaybeHandle<SeededNumberDictionary> array_buffer_transfer_map_;
+
   DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
 };
 
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index 0f19250..02d1db5 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -7,7 +7,7 @@
 #include "src/bit-vector.h"
 #include "src/flags.h"
 #include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/decoder.h"
@@ -36,6 +36,8 @@
     error("Invalid opcode (enable with --" #flag ")"); \
     break;                                             \
   }
+// TODO(titzer): this is only for intermediate migration.
+#define IMPLICIT_FUNCTION_END 1
 
 // An SsaEnv environment carries the current local variable renaming
 // as well as the current effect and control dependency in the TF graph.
@@ -68,62 +70,82 @@
   LocalType type;
 };
 
+struct TryInfo : public ZoneObject {
+  SsaEnv* catch_env;
+  TFNode* exception;
+
+  explicit TryInfo(SsaEnv* c) : catch_env(c), exception(nullptr) {}
+};
+
+struct MergeValues {
+  uint32_t arity;
+  union {
+    Value* array;
+    Value first;
+  } vals;  // Either multiple values or a single value.
+
+  Value& first() {
+    DCHECK_GT(arity, 0u);
+    return arity == 1 ? vals.first : vals.array[0];
+  }
+};
+
+static Value* NO_VALUE = nullptr;
+
+enum ControlKind { kControlIf, kControlBlock, kControlLoop, kControlTry };
+
 // An entry on the control stack (i.e. if, block, loop).
 struct Control {
   const byte* pc;
-  int stack_depth;         // stack height at the beginning of the construct.
-  SsaEnv* end_env;         // end environment for the construct.
-  SsaEnv* false_env;       // false environment (only for if).
-  SsaEnv* catch_env;       // catch environment (only for try with catch).
-  SsaEnv* finish_try_env;  // the environment where a try with finally lives.
-  TFNode* node;            // result node for the construct.
-  LocalType type;          // result type for the construct.
-  bool is_loop;            // true if this is the inner label of a loop.
+  ControlKind kind;
+  int stack_depth;    // stack height at the beginning of the construct.
+  SsaEnv* end_env;    // end environment for the construct.
+  SsaEnv* false_env;  // false environment (only for if).
+  TryInfo* try_info;  // Information used for compiling try statements.
+  int32_t previous_catch;  // The previous Control (on the stack) with a catch.
 
-  bool is_if() const { return *pc == kExprIf; }
+  // Values merged into the end of this control construct.
+  MergeValues merge;
 
-  bool is_try() const {
-    return *pc == kExprTryCatch || *pc == kExprTryCatchFinally ||
-           *pc == kExprTryFinally;
-  }
-
-  bool has_catch() const {
-    return *pc == kExprTryCatch || *pc == kExprTryCatchFinally;
-  }
-
-  bool has_finally() const {
-    return *pc == kExprTryCatchFinally || *pc == kExprTryFinally;
-  }
+  inline bool is_if() const { return kind == kControlIf; }
+  inline bool is_block() const { return kind == kControlBlock; }
+  inline bool is_loop() const { return kind == kControlLoop; }
+  inline bool is_try() const { return kind == kControlTry; }
 
   // Named constructors.
-  static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env) {
-    return {pc,      stack_depth, end_env, nullptr, nullptr,
-            nullptr, nullptr,     kAstEnd, false};
+  static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env,
+                       int32_t previous_catch) {
+    return {pc,      kControlBlock, stack_depth,    end_env,
+            nullptr, nullptr,       previous_catch, {0, {NO_VALUE}}};
   }
 
   static Control If(const byte* pc, int stack_depth, SsaEnv* end_env,
-                    SsaEnv* false_env) {
-    return {pc,      stack_depth, end_env,  false_env, nullptr,
-            nullptr, nullptr,     kAstStmt, false};
+                    SsaEnv* false_env, int32_t previous_catch) {
+    return {pc,        kControlIf, stack_depth,    end_env,
+            false_env, nullptr,    previous_catch, {0, {NO_VALUE}}};
   }
 
-  static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env) {
-    return {pc,      stack_depth, end_env, nullptr, nullptr,
-            nullptr, nullptr,     kAstEnd, true};
+  static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env,
+                      int32_t previous_catch) {
+    return {pc,      kControlLoop, stack_depth,    end_env,
+            nullptr, nullptr,      previous_catch, {0, {NO_VALUE}}};
   }
 
   static Control Try(const byte* pc, int stack_depth, SsaEnv* end_env,
-                     SsaEnv* catch_env, SsaEnv* finish_try_env) {
-    return {pc,      stack_depth, end_env, nullptr, catch_env, finish_try_env,
-            nullptr, kAstEnd,     false};
+                     Zone* zone, SsaEnv* catch_env, int32_t previous_catch) {
+    DCHECK_NOT_NULL(catch_env);
+    TryInfo* try_info = new (zone) TryInfo(catch_env);
+    return {pc,      kControlTry, stack_depth,    end_env,
+            nullptr, try_info,    previous_catch, {0, {NO_VALUE}}};
   }
 };
 
 // Macros that build nodes only if there is a graph and the current SSA
 // environment is reachable from start. This avoids problems with malformed
 // TF graphs when decoding inputs that have unreachable code.
-#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
-#define BUILD0(func) (build() ? builder_->func() : nullptr)
+#define BUILD(func, ...) \
+  (build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
+#define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
 
 // Generic Wasm bytecode decoder with utilities for decoding operands,
 // lengths, etc.
@@ -150,17 +172,18 @@
       }
       return true;
     }
-    error(pc, pc + 1, "invalid local index");
+    error(pc, pc + 1, "invalid local index: %u", operand.index);
     return false;
   }
 
   inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->globals.size()) {
-      operand.type = m->module->globals[operand.index].type;
+      operand.global = &m->module->globals[operand.index];
+      operand.type = operand.global->type;
       return true;
     }
-    error(pc, pc + 1, "invalid global index");
+    error(pc, pc + 1, "invalid global index: %u", operand.index);
     return false;
   }
 
@@ -175,16 +198,9 @@
 
   inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
     if (Complete(pc, operand)) {
-      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
-      if (operand.arity != expected) {
-        error(pc, pc + 1,
-              "arity mismatch in direct function call (expected %u, got %u)",
-              expected, operand.arity);
-        return false;
-      }
       return true;
     }
-    error(pc, pc + 1, "invalid function index");
+    error(pc, pc + 1, "invalid function index: %u", operand.index);
     return false;
   }
 
@@ -199,161 +215,28 @@
 
   inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
     if (Complete(pc, operand)) {
-      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
-      if (operand.arity != expected) {
-        error(pc, pc + 1,
-              "arity mismatch in indirect function call (expected %u, got %u)",
-              expected, operand.arity);
-        return false;
-      }
       return true;
     }
-    error(pc, pc + 1, "invalid signature index");
-    return false;
-  }
-
-  inline bool Complete(const byte* pc, CallImportOperand& operand) {
-    ModuleEnv* m = module_;
-    if (m && m->module && operand.index < m->module->import_table.size()) {
-      operand.sig = m->module->import_table[operand.index].sig;
-      return true;
-    }
-    return false;
-  }
-
-  inline bool Validate(const byte* pc, CallImportOperand& operand) {
-    if (Complete(pc, operand)) {
-      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
-      if (operand.arity != expected) {
-        error(pc, pc + 1, "arity mismatch in import call (expected %u, got %u)",
-              expected, operand.arity);
-        return false;
-      }
-      return true;
-    }
-    error(pc, pc + 1, "invalid signature index");
+    error(pc, pc + 1, "invalid signature index: #%u", operand.index);
     return false;
   }
 
   inline bool Validate(const byte* pc, BreakDepthOperand& operand,
                        ZoneVector<Control>& control) {
-    if (operand.arity > 1) {
-      error(pc, pc + 1, "invalid arity for br or br_if");
-      return false;
-    }
     if (operand.depth < control.size()) {
       operand.target = &control[control.size() - operand.depth - 1];
       return true;
     }
-    error(pc, pc + 1, "invalid break depth");
+    error(pc, pc + 1, "invalid break depth: %u", operand.depth);
     return false;
   }
 
   bool Validate(const byte* pc, BranchTableOperand& operand,
                 size_t block_depth) {
-    if (operand.arity > 1) {
-      error(pc, pc + 1, "invalid arity for break");
-      return false;
-    }
-    // Verify table.
-    for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
-      uint32_t target = operand.read_entry(this, i);
-      if (target >= block_depth) {
-        error(operand.table + i * 2, "improper branch in br_table");
-        return false;
-      }
-    }
+    // TODO(titzer): add extra redundant validation for br_table here?
     return true;
   }
 
-  unsigned OpcodeArity(const byte* pc) {
-#define DECLARE_ARITY(name, ...)                          \
-  static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
-  static const int kArity_##name =                        \
-      static_cast<int>(arraysize(kTypes_##name) - 1);
-
-    FOREACH_SIGNATURE(DECLARE_ARITY);
-#undef DECLARE_ARITY
-
-    switch (static_cast<WasmOpcode>(*pc)) {
-      case kExprI8Const:
-      case kExprI32Const:
-      case kExprI64Const:
-      case kExprF64Const:
-      case kExprF32Const:
-      case kExprGetLocal:
-      case kExprGetGlobal:
-      case kExprNop:
-      case kExprUnreachable:
-      case kExprEnd:
-      case kExprBlock:
-      case kExprThrow:
-      case kExprTryCatch:
-      case kExprTryCatchFinally:
-      case kExprTryFinally:
-      case kExprFinally:
-      case kExprLoop:
-        return 0;
-
-      case kExprSetGlobal:
-      case kExprSetLocal:
-      case kExprElse:
-      case kExprCatch:
-        return 1;
-
-      case kExprBr: {
-        BreakDepthOperand operand(this, pc);
-        return operand.arity;
-      }
-      case kExprBrIf: {
-        BreakDepthOperand operand(this, pc);
-        return 1 + operand.arity;
-      }
-      case kExprBrTable: {
-        BranchTableOperand operand(this, pc);
-        return 1 + operand.arity;
-      }
-
-      case kExprIf:
-        return 1;
-      case kExprSelect:
-        return 3;
-
-      case kExprCallFunction: {
-        CallFunctionOperand operand(this, pc);
-        return operand.arity;
-      }
-      case kExprCallIndirect: {
-        CallIndirectOperand operand(this, pc);
-        return 1 + operand.arity;
-      }
-      case kExprCallImport: {
-        CallImportOperand operand(this, pc);
-        return operand.arity;
-      }
-      case kExprReturn: {
-        ReturnArityOperand operand(this, pc);
-        return operand.arity;
-      }
-
-#define DECLARE_OPCODE_CASE(name, opcode, sig) \
-  case kExpr##name:                            \
-    return kArity_##sig;
-
-        FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_SIMPLE_MEM_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
-        FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
-      default:
-        UNREACHABLE();
-        return 0;
-    }
-  }
-
   unsigned OpcodeLength(const byte* pc) {
     switch (static_cast<WasmOpcode>(*pc)) {
 #define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
@@ -361,7 +244,7 @@
       FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
 #undef DECLARE_OPCODE_CASE
       {
-        MemoryAccessOperand operand(this, pc);
+        MemoryAccessOperand operand(this, pc, UINT32_MAX);
         return 1 + operand.length;
       }
       case kExprBr:
@@ -383,12 +266,17 @@
         CallIndirectOperand operand(this, pc);
         return 1 + operand.length;
       }
-      case kExprCallImport: {
-        CallImportOperand operand(this, pc);
+
+      case kExprTry:
+      case kExprIf:  // fall thru
+      case kExprLoop:
+      case kExprBlock: {
+        BlockTypeOperand operand(this, pc);
         return 1 + operand.length;
       }
 
       case kExprSetLocal:
+      case kExprTeeLocal:
       case kExprGetLocal:
       case kExprCatch: {
         LocalIndexOperand operand(this, pc);
@@ -396,7 +284,8 @@
       }
       case kExprBrTable: {
         BranchTableOperand operand(this, pc);
-        return 1 + operand.length;
+        BranchTableIterator iterator(this, operand);
+        return 1 + iterator.length();
       }
       case kExprI32Const: {
         ImmI32Operand operand(this, pc);
@@ -412,17 +301,14 @@
         return 5;
       case kExprF64Const:
         return 9;
-      case kExprReturn: {
-        ReturnArityOperand operand(this, pc);
-        return 1 + operand.length;
-      }
-
       default:
         return 1;
     }
   }
 };
 
+static const int32_t kNullCatch = -1;
+
 // The full WASM decoder for bytecode. Both verifies bytecode and generates
 // a TurboFan IR graph.
 class WasmFullDecoder : public WasmDecoder {
@@ -434,7 +320,9 @@
         base_(body.base),
         local_type_vec_(zone),
         stack_(zone),
-        control_(zone) {
+        control_(zone),
+        last_end_found_(false),
+        current_catch_(kNullCatch) {
     local_types_ = &local_type_vec_;
   }
 
@@ -447,7 +335,7 @@
     control_.clear();
 
     if (end_ < pc_) {
-      error(pc_, "function body end < start");
+      error("function body end < start");
       return false;
     }
 
@@ -457,23 +345,55 @@
 
     if (failed()) return TraceFailed();
 
+#if IMPLICIT_FUNCTION_END
+    // With implicit end support (old style), the function block
+    // remains on the stack. Other control blocks are an error.
+    if (control_.size() > 1) {
+      error(pc_, control_.back().pc, "unterminated control structure");
+      return TraceFailed();
+    }
+
+    // Assume an implicit end to the function body block.
+    if (control_.size() == 1) {
+      Control* c = &control_.back();
+      if (ssa_env_->go()) {
+        FallThruTo(c);
+      }
+
+      if (c->end_env->go()) {
+        // Push the end values onto the stack.
+        stack_.resize(c->stack_depth);
+        if (c->merge.arity == 1) {
+          stack_.push_back(c->merge.vals.first);
+        } else {
+          for (unsigned i = 0; i < c->merge.arity; i++) {
+            stack_.push_back(c->merge.vals.array[i]);
+          }
+        }
+
+        TRACE("  @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+        SetEnv("function:end", c->end_env);
+        DoReturn();
+        TRACE("\n");
+      }
+    }
+#else
     if (!control_.empty()) {
       error(pc_, control_.back().pc, "unterminated control structure");
       return TraceFailed();
     }
 
-    if (ssa_env_->go()) {
-      TRACE("  @%-6d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
-      DoReturn();
-      if (failed()) return TraceFailed();
-      TRACE("\n");
+    if (!last_end_found_) {
+      error("function body must end with \"end\" opcode.");
+      return false;
     }
+#endif
 
     if (FLAG_trace_wasm_decode_time) {
       double ms = decode_timer.Elapsed().InMillisecondsF();
-      PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
+      PrintF("wasm-decode %s (%0.3f ms)\n\n", ok() ? "ok" : "failed", ms);
     } else {
-      TRACE("wasm-decode ok\n\n");
+      TRACE("wasm-decode %s\n\n", ok() ? "ok" : "failed");
     }
 
     return true;
@@ -526,6 +446,11 @@
   ZoneVector<LocalType> local_type_vec_;  // types of local variables.
   ZoneVector<Value> stack_;               // stack of values.
   ZoneVector<Control> control_;           // stack of blocks, loops, and ifs.
+  bool last_end_found_;
+
+  int32_t current_catch_;
+
+  TryInfo* current_try_info() { return control_[current_catch_].try_info; }
 
   inline bool build() { return builder_ && ssa_env_->go(); }
 
@@ -574,6 +499,8 @@
         return builder_->Float32Constant(0);
       case kAstF64:
         return builder_->Float64Constant(0);
+      case kAstS128:
+        return builder_->DefaultS128Value();
       default:
         UNREACHABLE();
         return nullptr;
@@ -603,8 +530,13 @@
     }
     // Decode local declarations, if any.
     uint32_t entries = consume_u32v("local decls count");
+    TRACE("local decls count: %u\n", entries);
     while (entries-- > 0 && pc_ < limit_) {
       uint32_t count = consume_u32v("local count");
+      if (count > kMaxNumWasmLocals) {
+        error(pc_ - 1, "local count too large");
+        return;
+      }
       byte code = consume_u8("local type");
       LocalType type;
       switch (code) {
@@ -620,6 +552,9 @@
         case kLocalF64:
           type = kAstF64;
           break;
+        case kLocalS128:
+          type = kAstS128;
+          break;
         default:
           error(pc_ - 1, "invalid local type");
           return;
@@ -636,82 +571,68 @@
           reinterpret_cast<const void*>(limit_), baserel(pc_),
           static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
 
+    {
+      // Set up initial function block.
+      SsaEnv* break_env = ssa_env_;
+      SetEnv("initial env", Steal(break_env));
+      PushBlock(break_env);
+      Control* c = &control_.back();
+      c->merge.arity = static_cast<uint32_t>(sig_->return_count());
+
+      if (c->merge.arity == 1) {
+        c->merge.vals.first = {pc_, nullptr, sig_->GetReturn(0)};
+      } else if (c->merge.arity > 1) {
+        c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
+        for (unsigned i = 0; i < c->merge.arity; i++) {
+          c->merge.vals.array[i] = {pc_, nullptr, sig_->GetReturn(i)};
+        }
+      }
+    }
+
     if (pc_ >= limit_) return;  // Nothing to do.
 
     while (true) {  // decoding loop.
       unsigned len = 1;
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
-      TRACE("  @%-6d #%02x:%-20s|", startrel(pc_), opcode,
-            WasmOpcodes::ShortOpcodeName(opcode));
+      if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+        TRACE("  @%-8d #%02x:%-20s|", startrel(pc_), opcode,
+              WasmOpcodes::ShortOpcodeName(opcode));
+      }
 
       FunctionSig* sig = WasmOpcodes::Signature(opcode);
       if (sig) {
-        // Fast case of a simple operator.
-        TFNode* node;
-        switch (sig->parameter_count()) {
-          case 1: {
-            Value val = Pop(0, sig->GetParam(0));
-            node = BUILD(Unop, opcode, val.node, position());
-            break;
-          }
-          case 2: {
-            Value rval = Pop(1, sig->GetParam(1));
-            Value lval = Pop(0, sig->GetParam(0));
-            node = BUILD(Binop, opcode, lval.node, rval.node, position());
-            break;
-          }
-          default:
-            UNREACHABLE();
-            node = nullptr;
-            break;
-        }
-        Push(GetReturnType(sig), node);
+        BuildSimpleOperator(opcode, sig);
       } else {
         // Complex bytecode.
         switch (opcode) {
           case kExprNop:
-            Push(kAstStmt, nullptr);
             break;
           case kExprBlock: {
             // The break environment is the outer environment.
+            BlockTypeOperand operand(this, pc_);
             SsaEnv* break_env = ssa_env_;
             PushBlock(break_env);
             SetEnv("block:start", Steal(break_env));
+            SetBlockType(&control_.back(), operand);
+            len = 1 + operand.length;
             break;
           }
           case kExprThrow: {
             CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
-            Pop(0, kAstI32);
-
-            // TODO(jpp): start exception propagation.
+            Value value = Pop(0, kAstI32);
+            BUILD(Throw, value.node);
             break;
           }
-          case kExprTryCatch: {
+          case kExprTry: {
             CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+            BlockTypeOperand operand(this, pc_);
             SsaEnv* outer_env = ssa_env_;
             SsaEnv* try_env = Steal(outer_env);
-            SsaEnv* catch_env = Split(try_env);
-            PushTry(outer_env, catch_env, nullptr);
+            SsaEnv* catch_env = UnreachableEnv();
+            PushTry(outer_env, catch_env);
             SetEnv("try_catch:start", try_env);
-            break;
-          }
-          case kExprTryCatchFinally: {
-            CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
-            SsaEnv* outer_env = ssa_env_;
-            SsaEnv* try_env = Steal(outer_env);
-            SsaEnv* catch_env = Split(try_env);
-            SsaEnv* finally_env = Split(try_env);
-            PushTry(finally_env, catch_env, outer_env);
-            SetEnv("try_catch_finally:start", try_env);
-            break;
-          }
-          case kExprTryFinally: {
-            CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
-            SsaEnv* outer_env = ssa_env_;
-            SsaEnv* try_env = Steal(outer_env);
-            SsaEnv* finally_env = Split(outer_env);
-            PushTry(finally_env, nullptr, outer_env);
-            SetEnv("try_finally:start", try_env);
+            SetBlockType(&control_.back(), operand);
+            len = 1 + operand.length;
             break;
           }
           case kExprCatch: {
@@ -720,97 +641,57 @@
             len = 1 + operand.length;
 
             if (control_.empty()) {
-              error(pc_, "catch does not match a any try");
+              error("catch does not match any try");
               break;
             }
 
             Control* c = &control_.back();
-            if (!c->has_catch()) {
-              error(pc_, "catch does not match a try with catch");
+            if (!c->is_try()) {
+              error("catch does not match any try");
               break;
             }
 
-            if (c->catch_env == nullptr) {
+            if (c->try_info->catch_env == nullptr) {
               error(pc_, "catch already present for try with catch");
               break;
             }
 
-            Goto(ssa_env_, c->end_env);
+            if (ssa_env_->go()) {
+              MergeValuesInto(c);
+            }
+            stack_.resize(c->stack_depth);
 
-            SsaEnv* catch_env = c->catch_env;
-            c->catch_env = nullptr;
+            DCHECK_NOT_NULL(c->try_info);
+            SsaEnv* catch_env = c->try_info->catch_env;
+            c->try_info->catch_env = nullptr;
             SetEnv("catch:begin", catch_env);
+            current_catch_ = c->previous_catch;
 
             if (Validate(pc_, operand)) {
-              // TODO(jpp): figure out how thrown value is propagated. It is
-              // unlikely to be a value on the stack.
               if (ssa_env_->locals) {
-                ssa_env_->locals[operand.index] = nullptr;
+                TFNode* exception_as_i32 =
+                    BUILD(Catch, c->try_info->exception, position());
+                ssa_env_->locals[operand.index] = exception_as_i32;
               }
             }
 
-            PopUpTo(c->stack_depth);
-
-            break;
-          }
-          case kExprFinally: {
-            CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
-            if (control_.empty()) {
-              error(pc_, "finally does not match a any try");
-              break;
-            }
-
-            Control* c = &control_.back();
-            if (c->has_catch() && c->catch_env != nullptr) {
-              error(pc_, "missing catch for try with catch and finally");
-              break;
-            }
-
-            if (!c->has_finally()) {
-              error(pc_, "finally does not match a try with finally");
-              break;
-            }
-
-            if (c->finish_try_env == nullptr) {
-              error(pc_, "finally already present for try with finally");
-              break;
-            }
-
-            // ssa_env_ is either the env for either the try or the catch, but
-            // it does not matter: either way we need to direct the control flow
-            // to the end_env, which is the env for the finally.
-            // c->finish_try_env is the the environment enclosing the try block.
-            Goto(ssa_env_, c->end_env);
-
-            PopUpTo(c->stack_depth);
-
-            // The current environment becomes end_env, and finish_try_env
-            // becomes the new end_env. This ensures that any control flow
-            // leaving a try block up to now will do so by branching to the
-            // finally block. Setting the end_env to be finish_try_env ensures
-            // that kExprEnd below can handle the try block as it would any
-            // other block construct.
-            SsaEnv* finally_env = c->end_env;
-            c->end_env = c->finish_try_env;
-            SetEnv("finally:begin", finally_env);
-            c->finish_try_env = nullptr;
-
             break;
           }
           case kExprLoop: {
-            // The break environment is the outer environment.
-            SsaEnv* break_env = ssa_env_;
-            PushBlock(break_env);
-            SsaEnv* finish_try_env = Steal(break_env);
+            BlockTypeOperand operand(this, pc_);
+            SsaEnv* finish_try_env = Steal(ssa_env_);
             // The continue environment is the inner environment.
             PrepareForLoop(pc_, finish_try_env);
             SetEnv("loop:start", Split(finish_try_env));
             ssa_env_->SetNotMerged();
             PushLoop(finish_try_env);
+            SetBlockType(&control_.back(), operand);
+            len = 1 + operand.length;
             break;
           }
           case kExprIf: {
             // Condition on top of stack. Split environments for branches.
+            BlockTypeOperand operand(this, pc_);
             Value cond = Pop(0, kAstI32);
             TFNode* if_true = nullptr;
             TFNode* if_false = nullptr;
@@ -822,11 +703,13 @@
             true_env->control = if_true;
             PushIf(end_env, false_env);
             SetEnv("if:true", true_env);
+            SetBlockType(&control_.back(), operand);
+            len = 1 + operand.length;
             break;
           }
           case kExprElse: {
             if (control_.empty()) {
-              error(pc_, "else does not match any if");
+              error("else does not match any if");
               break;
             }
             Control* c = &control_.back();
@@ -838,31 +721,38 @@
               error(pc_, c->pc, "else already present for if");
               break;
             }
-            Value val = PopUpTo(c->stack_depth);
-            MergeInto(c->end_env, &c->node, &c->type, val);
+            FallThruTo(c);
             // Switch to environment for false branch.
+            stack_.resize(c->stack_depth);
             SetEnv("if_else:false", c->false_env);
             c->false_env = nullptr;  // record that an else is already seen
             break;
           }
           case kExprEnd: {
             if (control_.empty()) {
-              error(pc_, "end does not match any if or block");
-              break;
+              error("end does not match any if, try, or block");
+              return;
             }
             const char* name = "block:end";
             Control* c = &control_.back();
-            Value val = PopUpTo(c->stack_depth);
-            if (c->is_loop) {
-              // Loops always push control in pairs.
-              control_.pop_back();
-              c = &control_.back();
-              name = "loop:end";
-            } else if (c->is_if()) {
+            if (c->is_loop()) {
+              // A loop just leaves the values on the stack.
+              TypeCheckLoopFallThru(c);
+              PopControl();
+              SetEnv("loop:end", ssa_env_);
+              break;
+            }
+            if (c->is_if()) {
               if (c->false_env != nullptr) {
                 // End the true branch of a one-armed if.
                 Goto(c->false_env, c->end_env);
-                val = {val.pc, nullptr, kAstStmt};
+                if (ssa_env_->go() && stack_.size() != c->stack_depth) {
+                  error("end of if expected empty stack");
+                  stack_.resize(c->stack_depth);
+                }
+                if (c->merge.arity > 0) {
+                  error("non-void one-armed if");
+                }
                 name = "if:merge";
               } else {
                 // End the false branch of a two-armed if.
@@ -871,28 +761,41 @@
             } else if (c->is_try()) {
               name = "try:end";
 
-              // try blocks do not yield a value.
-              val = {val.pc, nullptr, kAstStmt};
-
-              // validate that catch/finally were seen.
-              if (c->catch_env != nullptr) {
-                error(pc_, "missing catch in try with catch");
-                break;
-              }
-
-              if (c->finish_try_env != nullptr) {
-                error(pc_, "missing finally in try with finally");
+              // validate that catch was seen.
+              if (c->try_info->catch_env != nullptr) {
+                error(pc_, "missing catch in try");
                 break;
               }
             }
-
-            if (ssa_env_->go()) {
-              MergeInto(c->end_env, &c->node, &c->type, val);
-            }
+            FallThruTo(c);
             SetEnv(name, c->end_env);
+
+            // Push the end values onto the stack.
             stack_.resize(c->stack_depth);
-            Push(c->type, c->node);
-            control_.pop_back();
+            if (c->merge.arity == 1) {
+              stack_.push_back(c->merge.vals.first);
+            } else {
+              for (unsigned i = 0; i < c->merge.arity; i++) {
+                stack_.push_back(c->merge.vals.array[i]);
+              }
+            }
+
+            PopControl();
+
+            if (control_.empty()) {
+              // If the last (implicit) control was popped, check we are at end.
+              if (pc_ + 1 != end_) {
+                error(pc_, pc_ + 1, "trailing code after function end");
+              }
+              last_end_found_ = true;
+              if (ssa_env_->go()) {
+                // The result of the block is the return value.
+                TRACE("  @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+                DoReturn();
+                TRACE("\n");
+              }
+              return;
+            }
             break;
           }
           case kExprSelect: {
@@ -901,7 +804,7 @@
             Value tval = Pop();
             if (tval.type == kAstStmt || tval.type != fval.type) {
               if (tval.type != kAstEnd && fval.type != kAstEnd) {
-                error(pc_, "type mismatch in select");
+                error("type mismatch in select");
                 break;
               }
             }
@@ -923,39 +826,33 @@
           }
           case kExprBr: {
             BreakDepthOperand operand(this, pc_);
-            Value val = {pc_, nullptr, kAstStmt};
-            if (operand.arity) val = Pop();
             if (Validate(pc_, operand, control_)) {
-              BreakTo(operand.target, val);
+              BreakTo(operand.depth);
             }
             len = 1 + operand.length;
-            Push(kAstEnd, nullptr);
+            EndControl();
             break;
           }
           case kExprBrIf: {
             BreakDepthOperand operand(this, pc_);
-            Value cond = Pop(operand.arity, kAstI32);
-            Value val = {pc_, nullptr, kAstStmt};
-            if (operand.arity == 1) val = Pop();
-            if (Validate(pc_, operand, control_)) {
+            Value cond = Pop(0, kAstI32);
+            if (ok() && Validate(pc_, operand, control_)) {
               SsaEnv* fenv = ssa_env_;
               SsaEnv* tenv = Split(fenv);
               fenv->SetNotMerged();
               BUILD(Branch, cond.node, &tenv->control, &fenv->control);
               ssa_env_ = tenv;
-              BreakTo(operand.target, val);
+              BreakTo(operand.depth);
               ssa_env_ = fenv;
             }
             len = 1 + operand.length;
-            Push(kAstStmt, nullptr);
             break;
           }
           case kExprBrTable: {
             BranchTableOperand operand(this, pc_);
+            BranchTableIterator iterator(this, operand);
             if (Validate(pc_, operand, control_.size())) {
-              Value key = Pop(operand.arity, kAstI32);
-              Value val = {pc_, nullptr, kAstStmt};
-              if (operand.arity == 1) val = Pop();
+              Value key = Pop(0, kAstI32);
               if (failed()) break;
 
               SsaEnv* break_env = ssa_env_;
@@ -965,42 +862,43 @@
 
                 SsaEnv* copy = Steal(break_env);
                 ssa_env_ = copy;
-                for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
-                  uint16_t target = operand.read_entry(this, i);
+                while (iterator.has_next()) {
+                  uint32_t i = iterator.cur_index();
+                  const byte* pos = iterator.pc();
+                  uint32_t target = iterator.next();
+                  if (target >= control_.size()) {
+                    error(pos, "improper branch in br_table");
+                    break;
+                  }
                   ssa_env_ = Split(copy);
                   ssa_env_->control = (i == operand.table_count)
                                           ? BUILD(IfDefault, sw)
                                           : BUILD(IfValue, i, sw);
-                  int depth = target;
-                  Control* c = &control_[control_.size() - depth - 1];
-                  MergeInto(c->end_env, &c->node, &c->type, val);
+                  BreakTo(target);
                 }
               } else {
                 // Only a default target. Do the equivalent of br.
-                uint16_t target = operand.read_entry(this, 0);
-                int depth = target;
-                Control* c = &control_[control_.size() - depth - 1];
-                MergeInto(c->end_env, &c->node, &c->type, val);
+                const byte* pos = iterator.pc();
+                uint32_t target = iterator.next();
+                if (target >= control_.size()) {
+                  error(pos, "improper branch in br_table");
+                  break;
+                }
+                BreakTo(target);
               }
               // br_table ends the control flow like br.
               ssa_env_ = break_env;
-              Push(kAstStmt, nullptr);
             }
-            len = 1 + operand.length;
+            len = 1 + iterator.length();
             break;
           }
           case kExprReturn: {
-            ReturnArityOperand operand(this, pc_);
-            if (operand.arity != sig_->return_count()) {
-              error(pc_, pc_ + 1, "arity mismatch in return");
-            }
             DoReturn();
-            len = 1 + operand.length;
             break;
           }
           case kExprUnreachable: {
-            Push(kAstEnd, BUILD(Unreachable, position()));
-            ssa_env_->Kill(SsaEnv::kControlEnd);
+            BUILD(Unreachable, position());
+            EndControl();
             break;
           }
           case kExprI8Const: {
@@ -1050,11 +948,24 @@
             if (Validate(pc_, operand)) {
               Value val = Pop(0, local_type_vec_[operand.index]);
               if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprTeeLocal: {
+            LocalIndexOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              Value val = Pop(0, local_type_vec_[operand.index]);
+              if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
               Push(val.type, val.node);
             }
             len = 1 + operand.length;
             break;
           }
+          case kExprDrop: {
+            Pop();
+            break;
+          }
           case kExprGetGlobal: {
             GlobalIndexOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
@@ -1066,9 +977,13 @@
           case kExprSetGlobal: {
             GlobalIndexOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
-              Value val = Pop(0, operand.type);
-              BUILD(SetGlobal, operand.index, val.node);
-              Push(val.type, val.node);
+              if (operand.global->mutability) {
+                Value val = Pop(0, operand.type);
+                BUILD(SetGlobal, operand.index, val.node);
+              } else {
+                error(pc_, pc_ + 1, "immutable global #%u cannot be assigned",
+                      operand.index);
+              }
             }
             len = 1 + operand.length;
             break;
@@ -1088,7 +1003,6 @@
           case kExprI32LoadMem:
             len = DecodeLoadMem(kAstI32, MachineType::Int32());
             break;
-
           case kExprI64LoadMem8S:
             len = DecodeLoadMem(kAstI64, MachineType::Int8());
             break;
@@ -1143,17 +1057,24 @@
           case kExprF64StoreMem:
             len = DecodeStoreMem(kAstF64, MachineType::Float64());
             break;
-
+          case kExprGrowMemory:
+            if (module_->origin != kAsmJsOrigin) {
+              Value val = Pop(0, kAstI32);
+              Push(kAstI32, BUILD(GrowMemory, val.node));
+            } else {
+              error("grow_memory is not supported for asmjs modules");
+            }
+            break;
           case kExprMemorySize:
-            Push(kAstI32, BUILD(MemSize, 0));
+            Push(kAstI32, BUILD(CurrentMemoryPages));
             break;
           case kExprCallFunction: {
             CallFunctionOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
               TFNode** buffer = PopArgs(operand.sig);
-              TFNode* call =
-                  BUILD(CallDirect, operand.index, buffer, position());
-              Push(GetReturnType(operand.sig), call);
+              TFNode** rets = nullptr;
+              BUILD(CallDirect, operand.index, buffer, &rets, position());
+              PushReturns(operand.sig, rets);
             }
             len = 1 + operand.length;
             break;
@@ -1161,23 +1082,12 @@
           case kExprCallIndirect: {
             CallIndirectOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
-              TFNode** buffer = PopArgs(operand.sig);
               Value index = Pop(0, kAstI32);
-              if (buffer) buffer[0] = index.node;
-              TFNode* call =
-                  BUILD(CallIndirect, operand.index, buffer, position());
-              Push(GetReturnType(operand.sig), call);
-            }
-            len = 1 + operand.length;
-            break;
-          }
-          case kExprCallImport: {
-            CallImportOperand operand(this, pc_);
-            if (Validate(pc_, operand)) {
               TFNode** buffer = PopArgs(operand.sig);
-              TFNode* call =
-                  BUILD(CallImport, operand.index, buffer, position());
-              Push(GetReturnType(operand.sig), call);
+              if (buffer) buffer[0] = index.node;
+              TFNode** rets = nullptr;
+              BUILD(CallIndirect, operand.index, buffer, &rets, position());
+              PushReturns(operand.sig, rets);
             }
             len = 1 + operand.length;
             break;
@@ -1187,20 +1097,34 @@
             len++;
             byte simd_index = *(pc_ + 1);
             opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
-            DecodeSimdOpcode(opcode);
+            TRACE("  @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
+                  simd_index, WasmOpcodes::ShortOpcodeName(opcode));
+            len += DecodeSimdOpcode(opcode);
             break;
           }
-          default:
-            error("Invalid opcode");
-            return;
+          default: {
+            // Deal with special asmjs opcodes.
+            if (module_ && module_->origin == kAsmJsOrigin) {
+              sig = WasmOpcodes::AsmjsSignature(opcode);
+              if (sig) {
+                BuildSimpleOperator(opcode, sig);
+              }
+            } else {
+              error("Invalid opcode");
+              return;
+            }
+          }
         }
-      }  // end complex bytecode
+      }
 
 #if DEBUG
       if (FLAG_trace_wasm_decoder) {
         for (size_t i = 0; i < stack_.size(); ++i) {
           Value& val = stack_[i];
           WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
+          if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+            opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
+          }
           PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
                  static_cast<int>(val.pc - start_),
                  WasmOpcodes::ShortOpcodeName(opcode));
@@ -1215,7 +1139,8 @@
               PrintF("[%u]", operand.index);
               break;
             }
-            case kExprSetLocal: {
+            case kExprSetLocal:  // fallthru
+            case kExprTeeLocal: {
               LocalIndexOperand operand(this, val.pc);
               PrintF("[%u]", operand.index);
               break;
@@ -1234,7 +1159,21 @@
         return;
       }
     }  // end decode loop
-  }    // end DecodeFunctionBody()
+  }
+
+  void EndControl() { ssa_env_->Kill(SsaEnv::kControlEnd); }
+
+  void SetBlockType(Control* c, BlockTypeOperand& operand) {
+    c->merge.arity = operand.arity;
+    if (c->merge.arity == 1) {
+      c->merge.vals.first = {pc_, nullptr, operand.read_entry(0)};
+    } else if (c->merge.arity > 1) {
+      c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
+      for (unsigned i = 0; i < c->merge.arity; i++) {
+        c->merge.vals.array[i] = {pc_, nullptr, operand.read_entry(i)};
+      }
+    }
+  }
 
   TFNode** PopArgs(FunctionSig* sig) {
     if (build()) {
@@ -1260,27 +1199,35 @@
 
   void PushBlock(SsaEnv* end_env) {
     const int stack_depth = static_cast<int>(stack_.size());
-    control_.emplace_back(Control::Block(pc_, stack_depth, end_env));
+    control_.emplace_back(
+        Control::Block(pc_, stack_depth, end_env, current_catch_));
   }
 
   void PushLoop(SsaEnv* end_env) {
     const int stack_depth = static_cast<int>(stack_.size());
-    control_.emplace_back(Control::Loop(pc_, stack_depth, end_env));
+    control_.emplace_back(
+        Control::Loop(pc_, stack_depth, end_env, current_catch_));
   }
 
   void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
     const int stack_depth = static_cast<int>(stack_.size());
-    control_.emplace_back(Control::If(pc_, stack_depth, end_env, false_env));
+    control_.emplace_back(
+        Control::If(pc_, stack_depth, end_env, false_env, current_catch_));
   }
 
-  void PushTry(SsaEnv* end_env, SsaEnv* catch_env, SsaEnv* finish_try_env) {
+  void PushTry(SsaEnv* end_env, SsaEnv* catch_env) {
     const int stack_depth = static_cast<int>(stack_.size());
-    control_.emplace_back(
-        Control::Try(pc_, stack_depth, end_env, catch_env, finish_try_env));
+    control_.emplace_back(Control::Try(pc_, stack_depth, end_env, zone_,
+                                       catch_env, current_catch_));
+    current_catch_ = static_cast<int32_t>(control_.size() - 1);
   }
 
+  void PopControl() { control_.pop_back(); }
+
   int DecodeLoadMem(LocalType type, MachineType mem_type) {
-    MemoryAccessOperand operand(this, pc_);
+    MemoryAccessOperand operand(this, pc_,
+                                ElementSizeLog2Of(mem_type.representation()));
+
     Value index = Pop(0, kAstI32);
     TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
                          operand.alignment, position());
@@ -1289,24 +1236,45 @@
   }
 
   int DecodeStoreMem(LocalType type, MachineType mem_type) {
-    MemoryAccessOperand operand(this, pc_);
+    MemoryAccessOperand operand(this, pc_,
+                                ElementSizeLog2Of(mem_type.representation()));
     Value val = Pop(1, type);
     Value index = Pop(0, kAstI32);
     BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
           val.node, position());
-    Push(type, val.node);
     return 1 + operand.length;
   }
 
-  void DecodeSimdOpcode(WasmOpcode opcode) {
-    FunctionSig* sig = WasmOpcodes::Signature(opcode);
-    compiler::NodeVector inputs(sig->parameter_count(), zone_);
-    for (size_t i = sig->parameter_count(); i > 0; i--) {
-      Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
-      inputs[i - 1] = val.node;
+  unsigned DecodeSimdOpcode(WasmOpcode opcode) {
+    unsigned len = 0;
+    switch (opcode) {
+      case kExprI32x4ExtractLane: {
+        uint8_t lane = this->checked_read_u8(pc_, 2, "lane number");
+        if (lane < 0 || lane > 3) {
+          error(pc_, pc_ + 2, "invalid extract lane value");
+        }
+        TFNode* input = Pop(0, LocalType::kSimd128).node;
+        TFNode* node = BUILD(SimdExtractLane, opcode, lane, input);
+        Push(LocalType::kWord32, node);
+        len++;
+        break;
+      }
+      default: {
+        FunctionSig* sig = WasmOpcodes::Signature(opcode);
+        if (sig != nullptr) {
+          compiler::NodeVector inputs(sig->parameter_count(), zone_);
+          for (size_t i = sig->parameter_count(); i > 0; i--) {
+            Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
+            inputs[i - 1] = val.node;
+          }
+          TFNode* node = BUILD(SimdOp, opcode, inputs);
+          Push(GetReturnType(sig), node);
+        } else {
+          error("invalid simd opcode");
+        }
+      }
     }
-    TFNode* node = BUILD(SimdOp, opcode, inputs);
-    Push(GetReturnType(sig), node);
+    return len;
   }
 
   void DoReturn() {
@@ -1320,12 +1288,21 @@
       if (buffer) buffer[i] = val.node;
     }
 
-    Push(kAstEnd, BUILD(Return, count, buffer));
-    ssa_env_->Kill(SsaEnv::kControlEnd);
+    BUILD(Return, count, buffer);
+    EndControl();
   }
 
   void Push(LocalType type, TFNode* node) {
-    stack_.push_back({pc_, node, type});
+    if (type != kAstStmt && type != kAstEnd) {
+      stack_.push_back({pc_, node, type});
+    }
+  }
+
+  void PushReturns(FunctionSig* sig, TFNode** rets) {
+    for (size_t i = 0; i < sig->return_count(); i++) {
+      // When verifying only, then {rets} will be null, so push null.
+      Push(sig->GetReturn(i), rets ? rets[i] : nullptr);
+    }
   }
 
   const char* SafeOpcodeNameAt(const byte* pc) {
@@ -1334,6 +1311,10 @@
   }
 
   Value Pop(int index, LocalType expected) {
+    if (!ssa_env_->go()) {
+      // Unreachable code is essentially not typechecked.
+      return {pc_, nullptr, expected};
+    }
     Value val = Pop();
     if (val.type != expected) {
       if (val.type != kAstEnd) {
@@ -1346,6 +1327,10 @@
   }
 
   Value Pop() {
+    if (!ssa_env_->go()) {
+      // Unreachable code is essentially not typechecked.
+      return {pc_, nullptr, kAstEnd};
+    }
     size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
     if (stack_.size() <= limit) {
       Value val = {pc_, nullptr, kAstStmt};
@@ -1358,6 +1343,10 @@
   }
 
   Value PopUpTo(int stack_depth) {
+    if (!ssa_env_->go()) {
+      // Unreachable code is essentially not typechecked.
+      return {pc_, nullptr, kAstEnd};
+    }
     if (stack_depth == stack_.size()) {
       Value val = {pc_, nullptr, kAstStmt};
       return val;
@@ -1375,34 +1364,82 @@
 
   int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
 
-  void BreakTo(Control* block, Value& val) {
-    if (block->is_loop) {
+  void BreakTo(unsigned depth) {
+    if (!ssa_env_->go()) return;
+    Control* c = &control_[control_.size() - depth - 1];
+    if (c->is_loop()) {
       // This is the inner loop block, which does not have a value.
-      Goto(ssa_env_, block->end_env);
+      Goto(ssa_env_, c->end_env);
     } else {
-      // Merge the value into the production for the block.
-      MergeInto(block->end_env, &block->node, &block->type, val);
+      // Merge the value(s) into the end of the block.
+      if (static_cast<size_t>(c->stack_depth + c->merge.arity) >
+          stack_.size()) {
+        error(
+            pc_, pc_,
+            "expected at least %d values on the stack for br to @%d, found %d",
+            c->merge.arity, startrel(c->pc),
+            static_cast<int>(stack_.size() - c->stack_depth));
+        return;
+      }
+      MergeValuesInto(c);
     }
   }
 
-  void MergeInto(SsaEnv* target, TFNode** node, LocalType* type, Value& val) {
+  void FallThruTo(Control* c) {
     if (!ssa_env_->go()) return;
-    DCHECK_NE(kAstEnd, val.type);
+    // Merge the value(s) into the end of the block.
+    int arity = static_cast<int>(c->merge.arity);
+    if (c->stack_depth + arity != stack_.size()) {
+      error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
+            arity, startrel(c->pc));
+      return;
+    }
+    MergeValuesInto(c);
+  }
 
+  inline Value& GetMergeValueFromStack(Control* c, int i) {
+    return stack_[stack_.size() - c->merge.arity + i];
+  }
+
+  void TypeCheckLoopFallThru(Control* c) {
+    if (!ssa_env_->go()) return;
+    // Fallthru must match arity exactly.
+    int arity = static_cast<int>(c->merge.arity);
+    if (c->stack_depth + arity != stack_.size()) {
+      error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
+            arity, startrel(c->pc));
+      return;
+    }
+    // Typecheck the values left on the stack.
+    for (unsigned i = 0; i < c->merge.arity; i++) {
+      Value& val = GetMergeValueFromStack(c, i);
+      Value& old =
+          c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+      if (val.type != old.type) {
+        error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+              WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+        return;
+      }
+    }
+  }
+
+  void MergeValuesInto(Control* c) {
+    SsaEnv* target = c->end_env;
     bool first = target->state == SsaEnv::kUnreachable;
     Goto(ssa_env_, target);
 
-    if (first) {
-      // first merge to this environment; set the type and the node.
-      *type = val.type;
-      *node = val.node;
-    } else if (val.type == *type && val.type != kAstStmt) {
-      // merge with the existing value for this block.
-      *node = CreateOrMergeIntoPhi(*type, target->control, *node, val.node);
-    } else {
-      // types don't match, or block is already a stmt.
-      *type = kAstStmt;
-      *node = nullptr;
+    for (unsigned i = 0; i < c->merge.arity; i++) {
+      Value& val = GetMergeValueFromStack(c, i);
+      Value& old =
+          c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+      if (val.type != old.type) {
+        error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+              WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+        return;
+      }
+      old.node =
+          first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
+                                                  old.node, val.node);
     }
   }
 
@@ -1442,6 +1479,45 @@
     }
   }
 
+  TFNode* CheckForException(TFNode* node) {
+    if (node == nullptr) {
+      return nullptr;
+    }
+
+    const bool inside_try_scope = current_catch_ != kNullCatch;
+
+    if (!inside_try_scope) {
+      return node;
+    }
+
+    TFNode* if_success = nullptr;
+    TFNode* if_exception = nullptr;
+    if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
+      return node;
+    }
+
+    SsaEnv* success_env = Steal(ssa_env_);
+    success_env->control = if_success;
+
+    SsaEnv* exception_env = Split(success_env);
+    exception_env->control = if_exception;
+    TryInfo* try_info = current_try_info();
+    Goto(exception_env, try_info->catch_env);
+    TFNode* exception = try_info->exception;
+    if (exception == nullptr) {
+      DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
+      try_info->exception = if_exception;
+    } else {
+      DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
+      try_info->exception =
+          CreateOrMergeIntoPhi(kAstI32, try_info->catch_env->control,
+                               try_info->exception, if_exception);
+    }
+
+    SetEnv("if_success", success_env);
+    return node;
+  }
+
   void Goto(SsaEnv* from, SsaEnv* to) {
     DCHECK_NOT_NULL(to);
     if (!from->go()) return;
@@ -1630,16 +1706,15 @@
         case kExprLoop:
         case kExprIf:
         case kExprBlock:
-        case kExprTryCatch:
-        case kExprTryCatchFinally:
-        case kExprTryFinally:
+        case kExprTry:
+          length = OpcodeLength(pc);
           depth++;
-          DCHECK_EQ(1, OpcodeLength(pc));
           break;
-        case kExprSetLocal: {
+        case kExprSetLocal:  // fallthru
+        case kExprTeeLocal: {
           LocalIndexOperand operand(this, pc);
           if (assigned->length() > 0 &&
-              static_cast<int>(operand.index) < assigned->length()) {
+              operand.index < static_cast<uint32_t>(assigned->length())) {
             // Unverified code might have an out-of-bounds index.
             assigned->Add(operand.index);
           }
@@ -1664,11 +1739,33 @@
     DCHECK_EQ(pc_ - start_, offset);  // overflows cannot happen
     return offset;
   }
+
+  inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+    TFNode* node;
+    switch (sig->parameter_count()) {
+      case 1: {
+        Value val = Pop(0, sig->GetParam(0));
+        node = BUILD(Unop, opcode, val.node, position());
+        break;
+      }
+      case 2: {
+        Value rval = Pop(1, sig->GetParam(1));
+        Value lval = Pop(0, sig->GetParam(0));
+        node = BUILD(Binop, opcode, lval.node, rval.node, position());
+        break;
+      }
+      default:
+        UNREACHABLE();
+        node = nullptr;
+        break;
+    }
+    Push(GetReturnType(sig), node);
+  }
 };
 
 bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
                       const byte* end) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   Zone tmp(&allocator);
   FunctionBody body = {nullptr, nullptr, nullptr, start, end};
   WasmFullDecoder decoder(&tmp, nullptr, body);
@@ -1686,7 +1783,7 @@
   }
 }
 
-DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
                             FunctionBody& body) {
   Zone zone(allocator);
   WasmFullDecoder decoder(&zone, nullptr, body);
@@ -1694,8 +1791,8 @@
   return decoder.toResult<DecodeStruct*>(nullptr);
 }
 
-DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
-                          TFBuilder* builder, FunctionBody& body) {
+DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+                          FunctionBody& body) {
   Zone zone(allocator);
   WasmFullDecoder decoder(&zone, builder, body);
   decoder.Decode();
@@ -1707,18 +1804,13 @@
   return decoder.OpcodeLength(pc);
 }
 
-unsigned OpcodeArity(const byte* pc, const byte* end) {
-  WasmDecoder decoder(nullptr, nullptr, pc, end);
-  return decoder.OpcodeArity(pc);
-}
-
 void PrintAstForDebugging(const byte* start, const byte* end) {
-  base::AccountingAllocator allocator;
+  AccountingAllocator allocator;
   OFStream os(stdout);
   PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
 }
 
-bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
               std::ostream& os,
               std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
   Zone zone(allocator);
@@ -1777,68 +1869,57 @@
     }
 
     switch (opcode) {
-      case kExprIf:
       case kExprElse:
-      case kExprLoop:
-      case kExprBlock:
-      case kExprTryCatch:
-      case kExprTryCatchFinally:
-      case kExprTryFinally:
         os << "   // @" << i.pc_offset();
         control_depth++;
         break;
+      case kExprLoop:
+      case kExprIf:
+      case kExprBlock:
+      case kExprTry: {
+        BlockTypeOperand operand(&i, i.pc());
+        os << "   // @" << i.pc_offset();
+        for (unsigned i = 0; i < operand.arity; i++) {
+          os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+        }
+        control_depth++;
+        break;
+      }
       case kExprEnd:
         os << "   // @" << i.pc_offset();
         control_depth--;
         break;
       case kExprBr: {
         BreakDepthOperand operand(&i, i.pc());
-        os << "   // arity=" << operand.arity << " depth=" << operand.depth;
+        os << "   // depth=" << operand.depth;
         break;
       }
       case kExprBrIf: {
         BreakDepthOperand operand(&i, i.pc());
-        os << "   // arity=" << operand.arity << " depth" << operand.depth;
+        os << "   // depth=" << operand.depth;
         break;
       }
       case kExprBrTable: {
         BranchTableOperand operand(&i, i.pc());
-        os << "   // arity=" << operand.arity
-           << " entries=" << operand.table_count;
+        os << " // entries=" << operand.table_count;
         break;
       }
       case kExprCallIndirect: {
         CallIndirectOperand operand(&i, i.pc());
+        os << "   // sig #" << operand.index;
         if (decoder.Complete(i.pc(), operand)) {
-          os << "   // sig #" << operand.index << ": " << *operand.sig;
-        } else {
-          os << " // arity=" << operand.arity << " sig #" << operand.index;
-        }
-        break;
-      }
-      case kExprCallImport: {
-        CallImportOperand operand(&i, i.pc());
-        if (decoder.Complete(i.pc(), operand)) {
-          os << "   // import #" << operand.index << ": " << *operand.sig;
-        } else {
-          os << " // arity=" << operand.arity << " import #" << operand.index;
+          os << ": " << *operand.sig;
         }
         break;
       }
       case kExprCallFunction: {
         CallFunctionOperand operand(&i, i.pc());
+        os << " // function #" << operand.index;
         if (decoder.Complete(i.pc(), operand)) {
-          os << "   // function #" << operand.index << ": " << *operand.sig;
-        } else {
-          os << " // arity=" << operand.arity << " function #" << operand.index;
+          os << ": " << *operand.sig;
         }
         break;
       }
-      case kExprReturn: {
-        ReturnArityOperand operand(&i, i.pc());
-        os << "   // arity=" << operand.arity;
-        break;
-      }
       default:
         break;
       }
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index c4f6c16..8c2c2c4 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -21,6 +21,9 @@
 
 namespace wasm {
 
+const uint32_t kMaxNumWasmLocals = 8000000;
+struct WasmGlobal;
+
 // Helpers for decoding different kinds of operands which follow bytecodes.
 struct LocalIndexOperand {
   uint32_t index;
@@ -79,39 +82,111 @@
 struct GlobalIndexOperand {
   uint32_t index;
   LocalType type;
+  const WasmGlobal* global;
   unsigned length;
 
   inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
     index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+    global = nullptr;
     type = kAstStmt;
   }
 };
 
+struct BlockTypeOperand {
+  uint32_t arity;
+  const byte* types;  // pointer to encoded types for the block.
+  unsigned length;
+
+  inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
+    uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
+    LocalType type = kAstStmt;
+    length = 1;
+    arity = 0;
+    types = nullptr;
+    if (decode_local_type(val, &type)) {
+      arity = type == kAstStmt ? 0 : 1;
+      types = pc + 1;
+    } else {
+      // Handle multi-value blocks.
+      if (!FLAG_wasm_mv_prototype) {
+        decoder->error(pc, pc + 1, "invalid block arity > 1");
+        return;
+      }
+      if (val != kMultivalBlock) {
+        decoder->error(pc, pc + 1, "invalid block type");
+        return;
+      }
+      // Decode and check the types vector of the block.
+      unsigned len = 0;
+      uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
+      // {count} is encoded as {arity-2}, so that a {0} count here corresponds
+      // to a block with 2 values. This makes invalid/redundant encodings
+      // impossible.
+      arity = count + 2;
+      length = 1 + len + arity;
+      types = pc + 1 + 1 + len;
+
+      for (uint32_t i = 0; i < arity; i++) {
+        uint32_t offset = 1 + 1 + len + i;
+        val = decoder->checked_read_u8(pc, offset, "block type");
+        decode_local_type(val, &type);
+        if (type == kAstStmt) {
+          decoder->error(pc, pc + offset, "invalid block type");
+          return;
+        }
+      }
+    }
+  }
+  // Decode a byte representing a local type. Return {false} if the encoded
+  // byte was invalid or {kMultivalBlock}.
+  bool decode_local_type(uint8_t val, LocalType* result) {
+    switch (static_cast<LocalTypeCode>(val)) {
+      case kLocalVoid:
+        *result = kAstStmt;
+        return true;
+      case kLocalI32:
+        *result = kAstI32;
+        return true;
+      case kLocalI64:
+        *result = kAstI64;
+        return true;
+      case kLocalF32:
+        *result = kAstF32;
+        return true;
+      case kLocalF64:
+        *result = kAstF64;
+        return true;
+      default:
+        *result = kAstStmt;
+        return false;
+    }
+  }
+  LocalType read_entry(unsigned index) {
+    DCHECK_LT(index, arity);
+    LocalType result;
+    CHECK(decode_local_type(types[index], &result));
+    return result;
+  }
+};
+
 struct Control;
 struct BreakDepthOperand {
-  uint32_t arity;
   uint32_t depth;
   Control* target;
   unsigned length;
   inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
-    unsigned len1 = 0;
-    unsigned len2 = 0;
-    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
-    depth = decoder->checked_read_u32v(pc, 1 + len1, &len2, "break depth");
-    length = len1 + len2;
+    depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
     target = nullptr;
   }
 };
 
 struct CallIndirectOperand {
-  uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
   unsigned length;
   inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
     unsigned len1 = 0;
     unsigned len2 = 0;
-    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
     length = len1 + len2;
     sig = nullptr;
@@ -119,59 +194,32 @@
 };
 
 struct CallFunctionOperand {
-  uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
   unsigned length;
   inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
     unsigned len1 = 0;
     unsigned len2 = 0;
-    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
     length = len1 + len2;
     sig = nullptr;
   }
 };
 
-struct CallImportOperand {
-  uint32_t arity;
-  uint32_t index;
-  FunctionSig* sig;
-  unsigned length;
-  inline CallImportOperand(Decoder* decoder, const byte* pc) {
-    unsigned len1 = 0;
-    unsigned len2 = 0;
-    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
-    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "import index");
-    length = len1 + len2;
-    sig = nullptr;
-  }
-};
-
 struct BranchTableOperand {
-  uint32_t arity;
   uint32_t table_count;
+  const byte* start;
   const byte* table;
-  unsigned length;
   inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+    DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
+    start = pc + 1;
     unsigned len1 = 0;
-    unsigned len2 = 0;
-    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
-    table_count =
-        decoder->checked_read_u32v(pc, 1 + len1, &len2, "table count");
+    table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
     if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
-        len1 + len2 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+        len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
       decoder->error(pc, "branch table size overflow");
     }
-    length = len1 + len2 + (table_count + 1) * sizeof(uint32_t);
-
-    uint32_t table_start = 1 + len1 + len2;
-    if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
-                       "expected <table entries>")) {
-      table = pc + table_start;
-    } else {
-      table = nullptr;
-    }
+    table = pc + 1 + len1;
   }
   inline uint32_t read_entry(Decoder* decoder, unsigned i) {
     DCHECK(i <= table_count);
@@ -179,14 +227,58 @@
   }
 };
 
+// A helper to iterate over a branch table.
+class BranchTableIterator {
+ public:
+  unsigned cur_index() { return index_; }
+  bool has_next() { return index_ <= table_count_; }
+  uint32_t next() {
+    DCHECK(has_next());
+    index_++;
+    unsigned length = 0;
+    uint32_t result =
+        decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
+    pc_ += length;
+    return result;
+  }
+  // length, including the length of the {BranchTableOperand}, but not the
+  // opcode.
+  unsigned length() {
+    while (has_next()) next();
+    return static_cast<unsigned>(pc_ - start_);
+  }
+  const byte* pc() { return pc_; }
+
+  BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
+      : decoder_(decoder),
+        start_(operand.start),
+        pc_(operand.table),
+        index_(0),
+        table_count_(operand.table_count) {}
+
+ private:
+  Decoder* decoder_;
+  const byte* start_;
+  const byte* pc_;
+  uint32_t index_;        // the current index.
+  uint32_t table_count_;  // the count of entries, not including default.
+};
+
 struct MemoryAccessOperand {
   uint32_t alignment;
   uint32_t offset;
   unsigned length;
-  inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
+  inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
+                             uint32_t max_alignment) {
     unsigned alignment_length;
     alignment =
         decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+    if (max_alignment < alignment) {
+      decoder->error(pc, pc + 1,
+                     "invalid alignment; expected maximum alignment is %u, "
+                     "actual alignment is %u",
+                     max_alignment, alignment);
+    }
     unsigned offset_length;
     offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
                                         &offset_length, "offset");
@@ -194,15 +286,6 @@
   }
 };
 
-struct ReturnArityOperand {
-  uint32_t arity;
-  unsigned length;
-
-  inline ReturnArityOperand(Decoder* decoder, const byte* pc) {
-    arity = decoder->checked_read_u32v(pc, 1, &length, "return count");
-  }
-};
-
 typedef compiler::WasmGraphBuilder TFBuilder;
 struct ModuleEnv;  // forward declaration of module interface.
 
@@ -228,25 +311,25 @@
   return os;
 }
 
-DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
-                            FunctionBody& body);
-DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
-                          TFBuilder* builder, FunctionBody& body);
-bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+                                              FunctionBody& body);
+DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+                          FunctionBody& body);
+bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
               std::ostream& os,
               std::vector<std::tuple<uint32_t, int, int>>* offset_table);
 
 // A simplified form of AST printing, e.g. from a debugger.
 void PrintAstForDebugging(const byte* start, const byte* end);
 
-inline DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
                                    ModuleEnv* module, FunctionSig* sig,
                                    const byte* start, const byte* end) {
   FunctionBody body = {module, sig, nullptr, start, end};
   return VerifyWasmCode(allocator, body);
 }
 
-inline DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
+inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
                                  TFBuilder* builder, ModuleEnv* module,
                                  FunctionSig* sig, const byte* start,
                                  const byte* end) {
@@ -276,9 +359,6 @@
 // Computes the length of the opcode at the given address.
 unsigned OpcodeLength(const byte* pc, const byte* end);
 
-// Computes the arity (number of sub-nodes) of the opcode at the given address.
-unsigned OpcodeArity(const byte* pc, const byte* end);
-
 // A simple forward iterator for bytecodes.
 class BytecodeIterator : public Decoder {
  public:
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index a6ede54..d5c9f43 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -12,7 +12,7 @@
 #include "src/signature.h"
 #include "src/utils.h"
 #include "src/wasm/wasm-result.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -208,6 +208,19 @@
 
   // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
   void consume_bytes(int size) {
+    TRACE("  +%d  %-20s: %d bytes\n", static_cast<int>(pc_ - start_), "skip",
+          size);
+    if (checkAvailable(size)) {
+      pc_ += size;
+    } else {
+      pc_ = limit_;
+    }
+  }
+
+  // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
+  void consume_bytes(uint32_t size, const char* name = "skip") {
+    TRACE("  +%d  %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
+          size);
     if (checkAvailable(size)) {
       pc_ += size;
     } else {
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index 542c47c..9006561 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -27,6 +27,141 @@
 
 namespace {
 
+const char* kNameString = "name";
+const size_t kNameStringLength = 4;
+
+LocalType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
+  switch (expr.kind) {
+    case WasmInitExpr::kNone:
+      return kAstStmt;
+    case WasmInitExpr::kGlobalIndex:
+      return expr.val.global_index < module->globals.size()
+                 ? module->globals[expr.val.global_index].type
+                 : kAstStmt;
+    case WasmInitExpr::kI32Const:
+      return kAstI32;
+    case WasmInitExpr::kI64Const:
+      return kAstI64;
+    case WasmInitExpr::kF32Const:
+      return kAstF32;
+    case WasmInitExpr::kF64Const:
+      return kAstF64;
+    default:
+      UNREACHABLE();
+      return kAstStmt;
+  }
+}
+
+// An iterator over the sections in a WASM binary module.
+// Automatically skips all unknown sections.
+class WasmSectionIterator {
+ public:
+  explicit WasmSectionIterator(Decoder& decoder)
+      : decoder_(decoder),
+        section_code_(kUnknownSectionCode),
+        section_start_(decoder.pc()),
+        section_end_(decoder.pc()) {
+    next();
+  }
+
+  inline bool more() const {
+    return section_code_ != kUnknownSectionCode && decoder_.more();
+  }
+
+  inline WasmSectionCode section_code() const { return section_code_; }
+
+  inline const byte* section_start() const { return section_start_; }
+
+  inline uint32_t section_length() const {
+    return static_cast<uint32_t>(section_end_ - section_start_);
+  }
+
+  inline const byte* section_end() const { return section_end_; }
+
+  // Advances to the next section, checking that decoding the current section
+  // stopped at {section_end_}.
+  void advance() {
+    if (decoder_.pc() != section_end_) {
+      const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
+      decoder_.error(decoder_.pc(), decoder_.pc(),
+                     "section was %s than expected size "
+                     "(%u bytes expected, %zu decoded)",
+                     msg, section_length(),
+                     static_cast<size_t>(decoder_.pc() - section_start_));
+    }
+    next();
+  }
+
+ private:
+  Decoder& decoder_;
+  WasmSectionCode section_code_;
+  const byte* section_start_;
+  const byte* section_end_;
+
+  // Reads the section code/name at the current position and sets up
+  // the internal fields.
+  void next() {
+    while (true) {
+      if (!decoder_.more()) {
+        section_code_ = kUnknownSectionCode;
+        return;
+      }
+      uint8_t section_code = decoder_.consume_u8("section code");
+      // Read and check the section size.
+      uint32_t section_length = decoder_.consume_u32v("section length");
+      section_start_ = decoder_.pc();
+      if (decoder_.checkAvailable(section_length)) {
+        // Get the limit of the section within the module.
+        section_end_ = section_start_ + section_length;
+      } else {
+        // The section would extend beyond the end of the module.
+        section_end_ = section_start_;
+      }
+
+      if (section_code == kUnknownSectionCode) {
+        // Check for the known "names" section.
+        uint32_t string_length = decoder_.consume_u32v("section name length");
+        const byte* section_name_start = decoder_.pc();
+        decoder_.consume_bytes(string_length, "section name");
+        if (decoder_.failed() || decoder_.pc() > section_end_) {
+          TRACE("Section name of length %u couldn't be read\n", string_length);
+          section_code_ = kUnknownSectionCode;
+          return;
+        }
+
+        TRACE("  +%d  section name        : \"%.*s\"\n",
+              static_cast<int>(section_name_start - decoder_.start()),
+              string_length < 20 ? string_length : 20, section_name_start);
+
+        if (string_length == kNameStringLength &&
+            strncmp(reinterpret_cast<const char*>(section_name_start),
+                    kNameString, kNameStringLength) == 0) {
+          section_code = kNameSectionCode;
+        } else {
+          section_code = kUnknownSectionCode;
+        }
+      } else if (!IsValidSectionCode(section_code)) {
+        decoder_.error(decoder_.pc(), decoder_.pc(),
+                       "unknown section code #0x%02x", section_code);
+        section_code = kUnknownSectionCode;
+      }
+      section_code_ = static_cast<WasmSectionCode>(section_code);
+
+      TRACE("Section: %s\n", SectionName(section_code_));
+      if (section_code_ == kUnknownSectionCode &&
+          section_end_ > decoder_.pc()) {
+        // skip to the end of the unknown section.
+        uint32_t remaining =
+            static_cast<uint32_t>(section_end_ - decoder_.pc());
+        decoder_.consume_bytes(remaining, "section payload");
+        // fall through and continue to the next section.
+      } else {
+        return;
+      }
+    }
+  }
+};
+
 // The main logic for decoding the bytes of a module.
 class ModuleDecoder : public Decoder {
  public:
@@ -77,11 +212,9 @@
     module->min_mem_pages = 0;
     module->max_mem_pages = 0;
     module->mem_export = false;
-    module->mem_external = false;
     module->origin = origin_;
 
     const byte* pos = pc_;
-    int current_order = 0;
     uint32_t magic_word = consume_u32("wasm magic");
 #define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
     if (magic_word != kWasmMagic) {
@@ -89,7 +222,6 @@
             "expected magic word %02x %02x %02x %02x, "
             "found %02x %02x %02x %02x",
             BYTES(kWasmMagic), BYTES(magic_word));
-      goto done;
     }
 
     pos = pc_;
@@ -100,302 +232,367 @@
               "expected version %02x %02x %02x %02x, "
               "found %02x %02x %02x %02x",
               BYTES(kWasmVersion), BYTES(magic_version));
-        goto done;
       }
     }
 
-    // Decode the module sections.
-    while (pc_ < limit_) {
-      TRACE("DecodeSection\n");
-      pos = pc_;
+    WasmSectionIterator section_iter(*this);
 
-      // Read the section name.
-      uint32_t string_length = consume_u32v("section name length");
-      const byte* section_name_start = pc_;
-      consume_bytes(string_length);
-      if (failed()) {
-        TRACE("Section name of length %u couldn't be read\n", string_length);
-        break;
+    // ===== Type section ====================================================
+    if (section_iter.section_code() == kTypeSectionCode) {
+      uint32_t signatures_count = consume_u32v("signatures count");
+      module->signatures.reserve(SafeReserve(signatures_count));
+      for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+        TRACE("DecodeSignature[%d] module+%d\n", i,
+              static_cast<int>(pc_ - start_));
+        FunctionSig* s = consume_sig();
+        module->signatures.push_back(s);
       }
+      section_iter.advance();
+    }
 
-      TRACE("  +%d  section name        : \"%.*s\"\n",
-            static_cast<int>(section_name_start - start_),
-            string_length < 20 ? string_length : 20, section_name_start);
+    // ===== Import section ==================================================
+    if (section_iter.section_code() == kImportSectionCode) {
+      uint32_t import_table_count = consume_u32v("import table count");
+      module->import_table.reserve(SafeReserve(import_table_count));
+      for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
+        TRACE("DecodeImportTable[%d] module+%d\n", i,
+              static_cast<int>(pc_ - start_));
 
-      WasmSection::Code section =
-          WasmSection::lookup(section_name_start, string_length);
-
-      // Read and check the section size.
-      uint32_t section_length = consume_u32v("section length");
-      if (!checkAvailable(section_length)) {
-        // The section would extend beyond the end of the module.
-        break;
-      }
-      const byte* section_start = pc_;
-      const byte* expected_section_end = pc_ + section_length;
-
-      current_order = CheckSectionOrder(current_order, section);
-
-      switch (section) {
-        case WasmSection::Code::End:
-          // Terminate section decoding.
-          limit_ = pc_;
-          break;
-        case WasmSection::Code::Memory: {
-          module->min_mem_pages = consume_u32v("min memory");
-          module->max_mem_pages = consume_u32v("max memory");
-          module->mem_export = consume_u8("export memory") != 0;
-          break;
+        module->import_table.push_back({
+            0,                  // module_name_length
+            0,                  // module_name_offset
+            0,                  // field_name_offset
+            0,                  // field_name_length
+            kExternalFunction,  // kind
+            0                   // index
+        });
+        WasmImport* import = &module->import_table.back();
+        const byte* pos = pc_;
+        import->module_name_offset =
+            consume_string(&import->module_name_length, true);
+        if (import->module_name_length == 0) {
+          error(pos, "import module name cannot be NULL");
         }
-        case WasmSection::Code::Signatures: {
-          uint32_t signatures_count = consume_u32v("signatures count");
-          module->signatures.reserve(SafeReserve(signatures_count));
-          // Decode signatures.
-          for (uint32_t i = 0; i < signatures_count; ++i) {
-            if (failed()) break;
-            TRACE("DecodeSignature[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-            FunctionSig* s = consume_sig();
-            module->signatures.push_back(s);
-          }
-          break;
-        }
-        case WasmSection::Code::FunctionSignatures: {
-          uint32_t functions_count = consume_u32v("functions count");
-          module->functions.reserve(SafeReserve(functions_count));
-          for (uint32_t i = 0; i < functions_count; ++i) {
-            module->functions.push_back({nullptr,  // sig
-                                         i,        // func_index
-                                         0,        // sig_index
-                                         0,        // name_offset
-                                         0,        // name_length
-                                         0,        // code_start_offset
-                                         0});      // code_end_offset
+        import->field_name_offset =
+            consume_string(&import->field_name_length, true);
+
+        import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
+        switch (import->kind) {
+          case kExternalFunction: {
+            // ===== Imported function =======================================
+            import->index = static_cast<uint32_t>(module->functions.size());
+            module->num_imported_functions++;
+            module->functions.push_back({nullptr,        // sig
+                                         import->index,  // func_index
+                                         0,              // sig_index
+                                         0,              // name_offset
+                                         0,              // name_length
+                                         0,              // code_start_offset
+                                         0,              // code_end_offset
+                                         true,           // imported
+                                         false});        // exported
             WasmFunction* function = &module->functions.back();
             function->sig_index = consume_sig_index(module, &function->sig);
-          }
-          break;
-        }
-        case WasmSection::Code::FunctionBodies: {
-          const byte* pos = pc_;
-          uint32_t functions_count = consume_u32v("functions count");
-          if (functions_count != module->functions.size()) {
-            error(pos, pos, "function body count %u mismatch (%u expected)",
-                  functions_count,
-                  static_cast<uint32_t>(module->functions.size()));
             break;
           }
-          for (uint32_t i = 0; i < functions_count; ++i) {
-            WasmFunction* function = &module->functions[i];
-            uint32_t size = consume_u32v("body size");
-            function->code_start_offset = pc_offset();
-            function->code_end_offset = pc_offset() + size;
-
-            TRACE("  +%d  %-20s: (%d bytes)\n", pc_offset(), "function body",
-                  size);
-            pc_ += size;
-            if (pc_ > limit_) {
-              error(pc_, "function body extends beyond end of file");
-            }
-          }
-          break;
-        }
-        case WasmSection::Code::Names: {
-          const byte* pos = pc_;
-          uint32_t functions_count = consume_u32v("functions count");
-          if (functions_count != module->functions.size()) {
-            error(pos, pos, "function name count %u mismatch (%u expected)",
-                  functions_count,
-                  static_cast<uint32_t>(module->functions.size()));
+          case kExternalTable: {
+            // ===== Imported table ==========================================
+            import->index =
+                static_cast<uint32_t>(module->function_tables.size());
+            module->function_tables.push_back(
+                {0, 0, std::vector<int32_t>(), true, false});
+            expect_u8("element type", 0x20);
+            WasmIndirectFunctionTable* table = &module->function_tables.back();
+            consume_resizable_limits("element count", "elements", kMaxUInt32,
+                                     &table->size, &table->max_size);
             break;
           }
-
-          for (uint32_t i = 0; i < functions_count; ++i) {
-            WasmFunction* function = &module->functions[i];
-            function->name_offset =
-                consume_string(&function->name_length, false);
-
-            uint32_t local_names_count = consume_u32v("local names count");
-            for (uint32_t j = 0; j < local_names_count; j++) {
-              uint32_t unused = 0;
-              uint32_t offset = consume_string(&unused, false);
-              USE(unused);
-              USE(offset);
-            }
+          case kExternalMemory: {
+            // ===== Imported memory =========================================
+            //            import->index =
+            //            static_cast<uint32_t>(module->memories.size());
+            // TODO(titzer): imported memories
+            break;
           }
-          break;
-        }
-        case WasmSection::Code::Globals: {
-          uint32_t globals_count = consume_u32v("globals count");
-          module->globals.reserve(SafeReserve(globals_count));
-          // Decode globals.
-          for (uint32_t i = 0; i < globals_count; ++i) {
-            if (failed()) break;
-            TRACE("DecodeGlobal[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-            // Add an uninitialized global and pass a pointer to it.
-            module->globals.push_back({0, 0, kAstStmt, 0, false});
+          case kExternalGlobal: {
+            // ===== Imported global =========================================
+            import->index = static_cast<uint32_t>(module->globals.size());
+            module->globals.push_back(
+                {kAstStmt, false, NO_INIT, 0, true, false});
             WasmGlobal* global = &module->globals.back();
-            DecodeGlobalInModule(global);
-          }
-          break;
-        }
-        case WasmSection::Code::DataSegments: {
-          uint32_t data_segments_count = consume_u32v("data segments count");
-          module->data_segments.reserve(SafeReserve(data_segments_count));
-          // Decode data segments.
-          for (uint32_t i = 0; i < data_segments_count; ++i) {
-            if (failed()) break;
-            TRACE("DecodeDataSegment[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-            module->data_segments.push_back({0,        // dest_addr
-                                             0,        // source_offset
-                                             0,        // source_size
-                                             false});  // init
-            WasmDataSegment* segment = &module->data_segments.back();
-            DecodeDataSegmentInModule(module, segment);
-          }
-          break;
-        }
-        case WasmSection::Code::FunctionTable: {
-          // An indirect function table requires functions first.
-          CheckForFunctions(module, section);
-          // Assume only one table for now.
-          static const uint32_t kSupportedTableCount = 1;
-          module->function_tables.reserve(SafeReserve(kSupportedTableCount));
-          // Decode function table.
-          for (uint32_t i = 0; i < kSupportedTableCount; ++i) {
-            if (failed()) break;
-            TRACE("DecodeFunctionTable[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-            module->function_tables.push_back({0, 0, std::vector<uint16_t>()});
-            DecodeFunctionTableInModule(module, &module->function_tables[i]);
-          }
-          break;
-        }
-        case WasmSection::Code::StartFunction: {
-          // Declares a start function for a module.
-          CheckForFunctions(module, section);
-          if (module->start_function_index >= 0) {
-            error("start function already declared");
+            global->type = consume_value_type();
+            global->mutability = consume_u8("mutability") != 0;
             break;
           }
-          WasmFunction* func;
-          const byte* pos = pc_;
-          module->start_function_index = consume_func_index(module, &func);
-          if (func && func->sig->parameter_count() > 0) {
-            error(pos, "invalid start function: non-zero parameter count");
+          default:
+            error(pos, pos, "unknown import kind 0x%02x", import->kind);
             break;
-          }
-          break;
         }
-        case WasmSection::Code::ImportTable: {
-          uint32_t import_table_count = consume_u32v("import table count");
-          module->import_table.reserve(SafeReserve(import_table_count));
-          // Decode import table.
-          for (uint32_t i = 0; i < import_table_count; ++i) {
-            if (failed()) break;
-            TRACE("DecodeImportTable[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-
-            module->import_table.push_back({nullptr,  // sig
-                                            0,        // sig_index
-                                            0,        // module_name_offset
-                                            0,        // module_name_length
-                                            0,        // function_name_offset
-                                            0});      // function_name_length
-            WasmImport* import = &module->import_table.back();
-
-            import->sig_index = consume_sig_index(module, &import->sig);
-            const byte* pos = pc_;
-            import->module_name_offset =
-                consume_string(&import->module_name_length, true);
-            if (import->module_name_length == 0) {
-              error(pos, "import module name cannot be NULL");
-            }
-            import->function_name_offset =
-                consume_string(&import->function_name_length, true);
-          }
-          break;
-        }
-        case WasmSection::Code::ExportTable: {
-          // Declares an export table.
-          CheckForFunctions(module, section);
-          uint32_t export_table_count = consume_u32v("export table count");
-          module->export_table.reserve(SafeReserve(export_table_count));
-          // Decode export table.
-          for (uint32_t i = 0; i < export_table_count; ++i) {
-            if (failed()) break;
-            TRACE("DecodeExportTable[%d] module+%d\n", i,
-                  static_cast<int>(pc_ - start_));
-
-            module->export_table.push_back({0,    // func_index
-                                            0,    // name_offset
-                                            0});  // name_length
-            WasmExport* exp = &module->export_table.back();
-
-            WasmFunction* func;
-            exp->func_index = consume_func_index(module, &func);
-            exp->name_offset = consume_string(&exp->name_length, true);
-          }
-          // Check for duplicate exports.
-          if (ok() && module->export_table.size() > 1) {
-            std::vector<WasmExport> sorted_exports(module->export_table);
-            const byte* base = start_;
-            auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
-              // Return true if a < b.
-              uint32_t len = a.name_length;
-              if (len != b.name_length) return len < b.name_length;
-              return memcmp(base + a.name_offset, base + b.name_offset, len) <
-                     0;
-            };
-            std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
-                             cmp_less);
-            auto it = sorted_exports.begin();
-            WasmExport* last = &*it++;
-            for (auto end = sorted_exports.end(); it != end; last = &*it++) {
-              DCHECK(!cmp_less(*it, *last));  // Vector must be sorted.
-              if (!cmp_less(*last, *it)) {
-                const byte* pc = start_ + it->name_offset;
-                error(pc, pc,
-                      "Duplicate export name '%.*s' for functions %d and %d",
-                      it->name_length, pc, last->func_index, it->func_index);
-                break;
-              }
-            }
-          }
-          break;
-        }
-        case WasmSection::Code::Max:
-          // Skip unknown sections.
-          TRACE("Unknown section: '");
-          for (uint32_t i = 0; i != string_length; ++i) {
-            TRACE("%c", *(section_name_start + i));
-          }
-          TRACE("'\n");
-          consume_bytes(section_length);
-          break;
       }
-
-      if (pc_ != expected_section_end) {
-        const char* diff = pc_ < expected_section_end ? "shorter" : "longer";
-        size_t expected_length = static_cast<size_t>(section_length);
-        size_t actual_length = static_cast<size_t>(pc_ - section_start);
-        error(pc_, pc_,
-              "section \"%s\" %s (%zu bytes) than specified (%zu bytes)",
-              WasmSection::getName(section), diff, actual_length,
-              expected_length);
-        break;
-      }
+      section_iter.advance();
     }
 
-  done:
-    if (ok()) CalculateGlobalsOffsets(module);
+    // ===== Function section ================================================
+    if (section_iter.section_code() == kFunctionSectionCode) {
+      uint32_t functions_count = consume_u32v("functions count");
+      module->functions.reserve(SafeReserve(functions_count));
+      module->num_declared_functions = functions_count;
+      for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+        uint32_t func_index = static_cast<uint32_t>(module->functions.size());
+        module->functions.push_back({nullptr,     // sig
+                                     func_index,  // func_index
+                                     0,           // sig_index
+                                     0,           // name_offset
+                                     0,           // name_length
+                                     0,           // code_start_offset
+                                     0,           // code_end_offset
+                                     false,       // imported
+                                     false});     // exported
+        WasmFunction* function = &module->functions.back();
+        function->sig_index = consume_sig_index(module, &function->sig);
+      }
+      section_iter.advance();
+    }
+
+    // ===== Table section ===================================================
+    if (section_iter.section_code() == kTableSectionCode) {
+      const byte* pos = pc_;
+      uint32_t table_count = consume_u32v("table count");
+      // Require at most one table for now.
+      if (table_count > 1) {
+        error(pos, pos, "invalid table count %d, maximum 1", table_count);
+      }
+
+      for (uint32_t i = 0; ok() && i < table_count; i++) {
+        module->function_tables.push_back(
+            {0, 0, std::vector<int32_t>(), false, false});
+        WasmIndirectFunctionTable* table = &module->function_tables.back();
+        expect_u8("table type", kWasmAnyFunctionTypeForm);
+        consume_resizable_limits("table elements", "elements", kMaxUInt32,
+                                 &table->size, &table->max_size);
+      }
+      section_iter.advance();
+    }
+
+    // ===== Memory section ==================================================
+    if (section_iter.section_code() == kMemorySectionCode) {
+      const byte* pos = pc_;
+      uint32_t memory_count = consume_u32v("memory count");
+      // Require at most one memory for now.
+      if (memory_count > 1) {
+        error(pos, pos, "invalid memory count %d, maximum 1", memory_count);
+      }
+
+      for (uint32_t i = 0; ok() && i < memory_count; i++) {
+        consume_resizable_limits("memory", "pages", WasmModule::kMaxLegalPages,
+                                 &module->min_mem_pages,
+                                 &module->max_mem_pages);
+      }
+      section_iter.advance();
+    }
+
+    // ===== Global section ==================================================
+    if (section_iter.section_code() == kGlobalSectionCode) {
+      uint32_t globals_count = consume_u32v("globals count");
+      module->globals.reserve(SafeReserve(globals_count));
+      for (uint32_t i = 0; ok() && i < globals_count; ++i) {
+        TRACE("DecodeGlobal[%d] module+%d\n", i,
+              static_cast<int>(pc_ - start_));
+        // Add an uninitialized global and pass a pointer to it.
+        module->globals.push_back({kAstStmt, false, NO_INIT, 0, false, false});
+        WasmGlobal* global = &module->globals.back();
+        DecodeGlobalInModule(module, i, global);
+      }
+      section_iter.advance();
+    }
+
+    // ===== Export section ==================================================
+    if (section_iter.section_code() == kExportSectionCode) {
+      uint32_t export_table_count = consume_u32v("export table count");
+      module->export_table.reserve(SafeReserve(export_table_count));
+      for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
+        TRACE("DecodeExportTable[%d] module+%d\n", i,
+              static_cast<int>(pc_ - start_));
+
+        module->export_table.push_back({
+            0,                  // name_length
+            0,                  // name_offset
+            kExternalFunction,  // kind
+            0                   // index
+        });
+        WasmExport* exp = &module->export_table.back();
+
+        exp->name_offset = consume_string(&exp->name_length, true);
+        const byte* pos = pc();
+        exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
+        switch (exp->kind) {
+          case kExternalFunction: {
+            WasmFunction* func = nullptr;
+            exp->index = consume_func_index(module, &func);
+            module->num_exported_functions++;
+            if (func) func->exported = true;
+            break;
+          }
+          case kExternalTable: {
+            WasmIndirectFunctionTable* table = nullptr;
+            exp->index = consume_table_index(module, &table);
+            if (table) table->exported = true;
+            break;
+          }
+          case kExternalMemory: {
+            uint32_t index = consume_u32v("memory index");
+            if (index != 0) error("invalid memory index != 0");
+            module->mem_export = true;
+            break;
+          }
+          case kExternalGlobal: {
+            WasmGlobal* global = nullptr;
+            exp->index = consume_global_index(module, &global);
+            if (global) global->exported = true;
+            break;
+          }
+          default:
+            error(pos, pos, "invalid export kind 0x%02x", exp->kind);
+            break;
+        }
+      }
+      // Check for duplicate exports.
+      if (ok() && module->export_table.size() > 1) {
+        std::vector<WasmExport> sorted_exports(module->export_table);
+        const byte* base = start_;
+        auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
+          // Return true if a < b.
+          if (a.name_length != b.name_length) {
+            return a.name_length < b.name_length;
+          }
+          return memcmp(base + a.name_offset, base + b.name_offset,
+                        a.name_length) < 0;
+        };
+        std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
+                         cmp_less);
+        auto it = sorted_exports.begin();
+        WasmExport* last = &*it++;
+        for (auto end = sorted_exports.end(); it != end; last = &*it++) {
+          DCHECK(!cmp_less(*it, *last));  // Vector must be sorted.
+          if (!cmp_less(*last, *it)) {
+            const byte* pc = start_ + it->name_offset;
+            error(pc, pc,
+                  "Duplicate export name '%.*s' for functions %d and %d",
+                  it->name_length, pc, last->index, it->index);
+            break;
+          }
+        }
+      }
+      section_iter.advance();
+    }
+
+    // ===== Start section ===================================================
+    if (section_iter.section_code() == kStartSectionCode) {
+      WasmFunction* func;
+      const byte* pos = pc_;
+      module->start_function_index = consume_func_index(module, &func);
+      if (func && func->sig->parameter_count() > 0) {
+        error(pos, "invalid start function: non-zero parameter count");
+      }
+      section_iter.advance();
+    }
+
+    // ===== Elements section ================================================
+    if (section_iter.section_code() == kElementSectionCode) {
+      uint32_t element_count = consume_u32v("element count");
+      for (uint32_t i = 0; ok() && i < element_count; ++i) {
+        uint32_t table_index = consume_u32v("table index");
+        if (table_index != 0) error("illegal table index != 0");
+        WasmInitExpr offset = consume_init_expr(module, kAstI32);
+        uint32_t num_elem = consume_u32v("number of elements");
+        std::vector<uint32_t> vector;
+        module->table_inits.push_back({table_index, offset, vector});
+        WasmTableInit* init = &module->table_inits.back();
+        init->entries.reserve(SafeReserve(num_elem));
+        for (uint32_t j = 0; ok() && j < num_elem; j++) {
+          WasmFunction* func = nullptr;
+          init->entries.push_back(consume_func_index(module, &func));
+        }
+      }
+
+      section_iter.advance();
+    }
+
+    // ===== Code section ====================================================
+    if (section_iter.section_code() == kCodeSectionCode) {
+      const byte* pos = pc_;
+      uint32_t functions_count = consume_u32v("functions count");
+      if (functions_count != module->num_declared_functions) {
+        error(pos, pos, "function body count %u mismatch (%u expected)",
+              functions_count, module->num_declared_functions);
+      }
+      for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+        WasmFunction* function =
+            &module->functions[i + module->num_imported_functions];
+        uint32_t size = consume_u32v("body size");
+        function->code_start_offset = pc_offset();
+        function->code_end_offset = pc_offset() + size;
+        consume_bytes(size, "function body");
+      }
+      section_iter.advance();
+    }
+
+    // ===== Data section ====================================================
+    if (section_iter.section_code() == kDataSectionCode) {
+      uint32_t data_segments_count = consume_u32v("data segments count");
+      module->data_segments.reserve(SafeReserve(data_segments_count));
+      for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+        TRACE("DecodeDataSegment[%d] module+%d\n", i,
+              static_cast<int>(pc_ - start_));
+        module->data_segments.push_back({
+            NO_INIT,  // dest_addr
+            0,        // source_offset
+            0         // source_size
+        });
+        WasmDataSegment* segment = &module->data_segments.back();
+        DecodeDataSegmentInModule(module, segment);
+      }
+      section_iter.advance();
+    }
+
+    // ===== Name section ====================================================
+    if (section_iter.section_code() == kNameSectionCode) {
+      const byte* pos = pc_;
+      uint32_t functions_count = consume_u32v("functions count");
+      if (functions_count != module->num_declared_functions) {
+        error(pos, pos, "function name count %u mismatch (%u expected)",
+              functions_count, module->num_declared_functions);
+      }
+
+      for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+        WasmFunction* function =
+            &module->functions[i + module->num_imported_functions];
+        function->name_offset = consume_string(&function->name_length, false);
+
+        uint32_t local_names_count = consume_u32v("local names count");
+        for (uint32_t j = 0; ok() && j < local_names_count; j++) {
+          uint32_t unused = 0;
+          uint32_t offset = consume_string(&unused, false);
+          USE(unused);
+          USE(offset);
+        }
+      }
+      section_iter.advance();
+    }
+
+    // ===== Remaining sections ==============================================
+    if (section_iter.more() && ok()) {
+      error(pc(), pc(), "unexpected section: %s",
+            SectionName(section_iter.section_code()));
+    }
+
+    if (ok()) {
+      CalculateGlobalOffsets(module);
+      PreinitializeIndirectFunctionTables(module);
+    }
     const WasmModule* finished_module = module;
     ModuleResult result = toResult(finished_module);
-    if (FLAG_dump_wasm_module) {
-      DumpModule(module, result);
-    }
+    if (FLAG_dump_wasm_module) DumpModule(module, result);
     return result;
   }
 
@@ -405,27 +602,6 @@
     return count < kMaxReserve ? count : kMaxReserve;
   }
 
-  void CheckForFunctions(WasmModule* module, WasmSection::Code section) {
-    if (module->functions.size() == 0) {
-      error(pc_ - 1, nullptr, "functions must appear before section %s",
-            WasmSection::getName(section));
-    }
-  }
-
-  int CheckSectionOrder(int current_order, WasmSection::Code section) {
-    int next_order = WasmSection::getOrder(section);
-    if (next_order == 0) return current_order;
-    if (next_order == current_order) {
-      error(pc_, pc_, "section \"%s\" already defined",
-            WasmSection::getName(section));
-    }
-    if (next_order < current_order) {
-      error(pc_, pc_, "section \"%s\" out of order",
-            WasmSection::getName(section));
-    }
-    return next_order;
-  }
-
   // Decodes a single anonymous function starting at {start_}.
   FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
                                       WasmFunction* function) {
@@ -451,6 +627,11 @@
     return ok() ? result : nullptr;
   }
 
+  WasmInitExpr DecodeInitExpr(const byte* start) {
+    pc_ = start;
+    return consume_init_expr(nullptr, kAstStmt);
+  }
+
  private:
   Zone* module_zone;
   ModuleResult result_;
@@ -459,15 +640,28 @@
   uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
 
   // Decodes a single global entry inside a module starting at {pc_}.
-  void DecodeGlobalInModule(WasmGlobal* global) {
-    global->name_offset = consume_string(&global->name_length, false);
-    if (!unibrow::Utf8::Validate(start_ + global->name_offset,
-                                 global->name_length)) {
-      error("global name is not valid utf8");
+  void DecodeGlobalInModule(WasmModule* module, uint32_t index,
+                            WasmGlobal* global) {
+    global->type = consume_value_type();
+    global->mutability = consume_u8("mutability") != 0;
+    const byte* pos = pc();
+    global->init = consume_init_expr(module, kAstStmt);
+    switch (global->init.kind) {
+      case WasmInitExpr::kGlobalIndex:
+        if (global->init.val.global_index >= index) {
+          error("invalid global index in init expression");
+        } else if (module->globals[index].type != global->type) {
+          error("type mismatch in global initialization");
+        }
+        break;
+      default:
+        if (global->type != TypeOf(module, global->init)) {
+          error(pos, pos,
+                "type error in global initialization, expected %s, got %s",
+                WasmOpcodes::TypeName(global->type),
+                WasmOpcodes::TypeName(TypeOf(module, global->init)));
+        }
     }
-    global->type = consume_local_type();
-    global->offset = 0;
-    global->exported = consume_u8("exported") != 0;
   }
 
   bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
@@ -479,10 +673,10 @@
   // Decodes a single data segment entry inside a module starting at {pc_}.
   void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
     const byte* start = pc_;
-    segment->dest_addr = consume_u32v("destination");
+    expect_u8("linear memory index", 0);
+    segment->dest_addr = consume_init_expr(module, kAstI32);
     segment->source_size = consume_u32v("source size");
     segment->source_offset = static_cast<uint32_t>(pc_ - start_);
-    segment->init = true;
 
     // Validate the data is in the module.
     uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
@@ -491,40 +685,11 @@
       error(start, "segment out of bounds of module");
     }
 
-    // Validate that the segment will fit into the (minimum) memory.
-    uint32_t memory_limit =
-        WasmModule::kPageSize * (module ? module->min_mem_pages
-                                        : WasmModule::kMaxMemPages);
-    if (!IsWithinLimit(memory_limit, segment->dest_addr,
-                       segment->source_size)) {
-      error(start, "segment out of bounds of memory");
-    }
-
-    consume_bytes(segment->source_size);
-  }
-
-  // Decodes a single function table inside a module starting at {pc_}.
-  void DecodeFunctionTableInModule(WasmModule* module,
-                                   WasmIndirectFunctionTable* table) {
-    table->size = consume_u32v("function table entry count");
-    table->max_size = table->size;
-
-    if (table->max_size != table->size) {
-      error("invalid table maximum size");
-    }
-
-    for (uint32_t i = 0; i < table->size; ++i) {
-      uint16_t index = consume_u32v();
-      if (index >= module->functions.size()) {
-        error(pc_ - sizeof(index), "invalid function index");
-        break;
-      }
-      table->values.push_back(index);
-    }
+    consume_bytes(segment->source_size, "segment data");
   }
 
   // Calculate individual global offsets and total size of globals table.
-  void CalculateGlobalsOffsets(WasmModule* module) {
+  void CalculateGlobalOffsets(WasmModule* module) {
     uint32_t offset = 0;
     if (module->globals.size() == 0) {
       module->globals_size = 0;
@@ -540,6 +705,30 @@
     module->globals_size = offset;
   }
 
+  // TODO(titzer): this only works without overlapping initializations from
+  // global bases for entries
+  void PreinitializeIndirectFunctionTables(WasmModule* module) {
+    // Fill all tables with invalid entries first.
+    for (WasmIndirectFunctionTable& table : module->function_tables) {
+      table.values.resize(table.size);
+      for (size_t i = 0; i < table.size; i++) {
+        table.values[i] = kInvalidFunctionIndex;
+      }
+    }
+    for (WasmTableInit& init : module->table_inits) {
+      if (init.offset.kind != WasmInitExpr::kI32Const) continue;
+      if (init.table_index >= module->function_tables.size()) continue;
+      WasmIndirectFunctionTable& table =
+          module->function_tables[init.table_index];
+      for (size_t i = 0; i < init.entries.size(); i++) {
+        size_t index = i + init.offset.val.i32_const;
+        if (index < table.values.size()) {
+          table.values[index] = init.entries[i];
+        }
+      }
+    }
+  }
+
   // Verifies the body (code) of a given function.
   void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
                           WasmFunction* function) {
@@ -570,26 +759,18 @@
     }
   }
 
-  // Reads a single 32-bit unsigned integer interpreted as an offset, checking
-  // the offset is within bounds and advances.
-  uint32_t consume_offset(const char* name = nullptr) {
-    uint32_t offset = consume_u32(name ? name : "offset");
-    if (offset > static_cast<uint32_t>(limit_ - start_)) {
-      error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
-    }
-    return offset;
-  }
-
   // Reads a length-prefixed string, checking that it is within bounds. Returns
   // the offset of the string, and the length as an out parameter.
   uint32_t consume_string(uint32_t* length, bool validate_utf8) {
     *length = consume_u32v("string length");
     uint32_t offset = pc_offset();
-    TRACE("  +%u  %-20s: (%u bytes)\n", offset, "string", *length);
-    if (validate_utf8 && !unibrow::Utf8::Validate(pc_, *length)) {
-      error(pc_, "no valid UTF-8 string");
+    const byte* string_start = pc_;
+    // Consume bytes before validation to guarantee that the string is not oob.
+    consume_bytes(*length, "string");
+    if (ok() && validate_utf8 &&
+        !unibrow::Utf8::Validate(string_start, *length)) {
+      error(string_start, "no valid UTF-8 string");
     }
-    consume_bytes(*length);
     return offset;
   }
 
@@ -607,25 +788,134 @@
   }
 
   uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
+    return consume_index("function index", module->functions, func);
+  }
+
+  uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
+    return consume_index("global index", module->globals, global);
+  }
+
+  uint32_t consume_table_index(WasmModule* module,
+                               WasmIndirectFunctionTable** table) {
+    return consume_index("table index", module->function_tables, table);
+  }
+
+  template <typename T>
+  uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
     const byte* pos = pc_;
-    uint32_t func_index = consume_u32v("function index");
-    if (func_index >= module->functions.size()) {
-      error(pos, pos, "function index %u out of bounds (%d functions)",
-            func_index, static_cast<int>(module->functions.size()));
-      *func = nullptr;
+    uint32_t index = consume_u32v(name);
+    if (index >= vector.size()) {
+      error(pos, pos, "%s %u out of bounds (%d entries)", name, index,
+            static_cast<int>(vector.size()));
+      *ptr = nullptr;
       return 0;
     }
-    *func = &module->functions[func_index];
-    return func_index;
+    *ptr = &vector[index];
+    return index;
+  }
+
+  void consume_resizable_limits(const char* name, const char* units,
+                                uint32_t max_value, uint32_t* initial,
+                                uint32_t* maximum) {
+    uint32_t flags = consume_u32v("resizable limits flags");
+    const byte* pos = pc();
+    *initial = consume_u32v("initial size");
+    if (*initial > max_value) {
+      error(pos, pos,
+            "initial %s size (%u %s) is larger than maximum allowable (%u)",
+            name, *initial, units, max_value);
+    }
+    if (flags & 1) {
+      pos = pc();
+      *maximum = consume_u32v("maximum size");
+      if (*maximum > max_value) {
+        error(pos, pos,
+              "maximum %s size (%u %s) is larger than maximum allowable (%u)",
+              name, *maximum, units, max_value);
+      }
+      if (*maximum < *initial) {
+        error(pos, pos, "maximum %s size (%u %s) is less than initial (%u %s)",
+              name, *maximum, units, *initial, units);
+      }
+    } else {
+      *maximum = 0;
+    }
+  }
+
+  bool expect_u8(const char* name, uint8_t expected) {
+    const byte* pos = pc();
+    uint8_t value = consume_u8(name);
+    if (value != expected) {
+      error(pos, pos, "expected %s 0x%02x, got 0x%02x", name, expected, value);
+      return false;
+    }
+    return true;
+  }
+
+  WasmInitExpr consume_init_expr(WasmModule* module, LocalType expected) {
+    const byte* pos = pc();
+    uint8_t opcode = consume_u8("opcode");
+    WasmInitExpr expr;
+    unsigned len = 0;
+    switch (opcode) {
+      case kExprGetGlobal: {
+        GlobalIndexOperand operand(this, pc() - 1);
+        expr.kind = WasmInitExpr::kGlobalIndex;
+        expr.val.global_index = operand.index;
+        len = operand.length;
+        break;
+      }
+      case kExprI32Const: {
+        ImmI32Operand operand(this, pc() - 1);
+        expr.kind = WasmInitExpr::kI32Const;
+        expr.val.i32_const = operand.value;
+        len = operand.length;
+        break;
+      }
+      case kExprF32Const: {
+        ImmF32Operand operand(this, pc() - 1);
+        expr.kind = WasmInitExpr::kF32Const;
+        expr.val.f32_const = operand.value;
+        len = operand.length;
+        break;
+      }
+      case kExprI64Const: {
+        ImmI64Operand operand(this, pc() - 1);
+        expr.kind = WasmInitExpr::kI64Const;
+        expr.val.i64_const = operand.value;
+        len = operand.length;
+        break;
+      }
+      case kExprF64Const: {
+        ImmF64Operand operand(this, pc() - 1);
+        expr.kind = WasmInitExpr::kF64Const;
+        expr.val.f64_const = operand.value;
+        len = operand.length;
+        break;
+      }
+      default: {
+        error("invalid opcode in initialization expression");
+        expr.kind = WasmInitExpr::kNone;
+        expr.val.i32_const = 0;
+      }
+    }
+    consume_bytes(len, "init code");
+    if (!expect_u8("end opcode", kExprEnd)) {
+      expr.kind = WasmInitExpr::kNone;
+    }
+    if (expected != kAstStmt && TypeOf(module, expr) != kAstI32) {
+      error(pos, pos, "type error in init expression, expected %s, got %s",
+            WasmOpcodes::TypeName(expected),
+            WasmOpcodes::TypeName(TypeOf(module, expr)));
+    }
+    return expr;
   }
 
   // Reads a single 8-bit integer, interpreting it as a local type.
-  LocalType consume_local_type() {
-    byte val = consume_u8("local type");
+  LocalType consume_value_type() {
+    byte val = consume_u8("value type");
     LocalTypeCode t = static_cast<LocalTypeCode>(val);
     switch (t) {
-      case kLocalVoid:
-        return kAstStmt;
       case kLocalI32:
         return kAstI32;
       case kLocalI64:
@@ -634,6 +924,8 @@
         return kAstF32;
       case kLocalF64:
         return kAstF64;
+      case kLocalS128:
+        return kAstS128;
       default:
         error(pc_ - 1, "invalid local type");
         return kAstStmt;
@@ -642,19 +934,12 @@
 
   // Parses a type entry, which is currently limited to functions only.
   FunctionSig* consume_sig() {
-    const byte* pos = pc_;
-    byte form = consume_u8("type form");
-    if (form != kWasmFunctionTypeForm) {
-      error(pos, pos, "expected function type form (0x%02x), got: 0x%02x",
-            kWasmFunctionTypeForm, form);
-      return nullptr;
-    }
+    if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
     // parse parameter types
     uint32_t param_count = consume_u32v("param count");
     std::vector<LocalType> params;
-    for (uint32_t i = 0; i < param_count; ++i) {
-      LocalType param = consume_local_type();
-      if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
+    for (uint32_t i = 0; ok() && i < param_count; ++i) {
+      LocalType param = consume_value_type();
       params.push_back(param);
     }
 
@@ -667,12 +952,16 @@
       return nullptr;
     }
     std::vector<LocalType> returns;
-    for (uint32_t i = 0; i < return_count; ++i) {
-      LocalType ret = consume_local_type();
-      if (ret == kAstStmt) error(pc_ - 1, "invalid void return type");
+    for (uint32_t i = 0; ok() && i < return_count; ++i) {
+      LocalType ret = consume_value_type();
       returns.push_back(ret);
     }
 
+    if (failed()) {
+      // Decoding failed, return void -> void
+      return new (module_zone) FunctionSig(0, 0, nullptr);
+    }
+
     // FunctionSig stores the return types first.
     LocalType* buffer =
         module_zone->NewArray<LocalType>(param_count + return_count);
@@ -711,7 +1000,7 @@
 };
 
 Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
-                               WasmSection::Code code) {
+                               WasmSectionCode code) {
   Decoder decoder(module_start, module_end);
 
   uint32_t magic_word = decoder.consume_u32("wasm magic");
@@ -720,24 +1009,14 @@
   uint32_t magic_version = decoder.consume_u32("wasm version");
   if (magic_version != kWasmVersion) decoder.error("wrong wasm version");
 
-  while (decoder.more() && decoder.ok()) {
-    // Read the section name.
-    uint32_t string_length = decoder.consume_u32v("section name length");
-    const byte* section_name_start = decoder.pc();
-    decoder.consume_bytes(string_length);
-    if (decoder.failed()) break;
-
-    WasmSection::Code section =
-        WasmSection::lookup(section_name_start, string_length);
-
-    // Read and check the section size.
-    uint32_t section_length = decoder.consume_u32v("section length");
-
-    const byte* section_start = decoder.pc();
-    decoder.consume_bytes(section_length);
-    if (section == code && decoder.ok()) {
-      return Vector<const uint8_t>(section_start, section_length);
+  WasmSectionIterator section_iter(decoder);
+  while (section_iter.more()) {
+    if (section_iter.section_code() == code) {
+      return Vector<const uint8_t>(section_iter.section_start(),
+                                   section_iter.section_length());
     }
+    decoder.consume_bytes(section_iter.section_length(), "section payload");
+    section_iter.advance();
   }
 
   return Vector<const uint8_t>();
@@ -772,6 +1051,13 @@
   return decoder.DecodeFunctionSignature(start);
 }
 
+WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
+  AccountingAllocator allocator;
+  Zone zone(&allocator);
+  ModuleDecoder decoder(&zone, start, end, kWasmOrigin);
+  return decoder.DecodeInitExpr(start);
+}
+
 FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
                                   ModuleEnv* module_env,
                                   const byte* function_start,
@@ -789,15 +1075,26 @@
   return decoder.DecodeSingleFunction(module_env, function);
 }
 
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
-                                                const byte* module_end) {
+FunctionOffsetsResult DecodeWasmFunctionOffsets(
+    const byte* module_start, const byte* module_end,
+    uint32_t num_imported_functions) {
+  // Find and decode the code section.
   Vector<const byte> code_section =
-      FindSection(module_start, module_end, WasmSection::Code::FunctionBodies);
+      FindSection(module_start, module_end, kCodeSectionCode);
   Decoder decoder(code_section.start(), code_section.end());
-  if (!code_section.start()) decoder.error("no code section");
+  FunctionOffsets table;
+  if (!code_section.start()) {
+    decoder.error("no code section");
+    return decoder.toResult(std::move(table));
+  }
+
+  // Reserve entries for the imported functions.
+  table.reserve(num_imported_functions);
+  for (uint32_t i = 0; i < num_imported_functions; i++) {
+    table.push_back(std::make_pair(0, 0));
+  }
 
   uint32_t functions_count = decoder.consume_u32v("functions count");
-  FunctionOffsets table;
   // Take care of invalid input here.
   if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
     table.reserve(functions_count);
diff --git a/src/wasm/module-decoder.h b/src/wasm/module-decoder.h
index dd6bd3b..22a313c 100644
--- a/src/wasm/module-decoder.h
+++ b/src/wasm/module-decoder.h
@@ -12,9 +12,11 @@
 namespace internal {
 namespace wasm {
 // Decodes the bytes of a WASM module between {module_start} and {module_end}.
-ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
-                              const byte* module_start, const byte* module_end,
-                              bool verify_functions, ModuleOrigin origin);
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+                                                const byte* module_start,
+                                                const byte* module_end,
+                                                bool verify_functions,
+                                                ModuleOrigin origin);
 
 // Exposed for testing. Decodes a single function signature, allocating it
 // in the given zone. Returns {nullptr} upon failure.
@@ -30,8 +32,11 @@
 // Extracts the function offset table from the wasm module bytes.
 // Returns a vector with <offset, length> entries, or failure if the wasm bytes
 // are detected as invalid. Note that this validation is not complete.
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
-                                                const byte* module_end);
+FunctionOffsetsResult DecodeWasmFunctionOffsets(
+    const byte* module_start, const byte* module_end,
+    uint32_t num_imported_functions);
+
+WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end);
 
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/switch-logic.h b/src/wasm/switch-logic.h
index 8cef08b..160e0d6 100644
--- a/src/wasm/switch-logic.h
+++ b/src/wasm/switch-logic.h
@@ -5,8 +5,8 @@
 #ifndef V8_WASM_SWITCH_LOGIC_H
 #define V8_WASM_SWITCH_LOGIC_H
 
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/wasm/wasm-debug.cc b/src/wasm/wasm-debug.cc
index 54e7100..42a8e5f 100644
--- a/src/wasm/wasm-debug.cc
+++ b/src/wasm/wasm-debug.cc
@@ -32,11 +32,15 @@
   FunctionOffsetsResult function_offsets;
   {
     DisallowHeapAllocation no_gc;
+    Handle<JSObject> wasm_object(debug_info->wasm_object(), isolate);
+    uint32_t num_imported_functions =
+        wasm::GetNumImportedFunctions(wasm_object);
     SeqOneByteString *wasm_bytes =
         wasm::GetWasmBytes(debug_info->wasm_object());
     const byte *bytes_start = wasm_bytes->GetChars();
     const byte *bytes_end = bytes_start + wasm_bytes->length();
-    function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end);
+    function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end,
+                                                       num_imported_functions);
   }
   DCHECK(function_offsets.ok());
   size_t array_size = 2 * kIntSize * function_offsets.val.size();
@@ -179,7 +183,7 @@
     Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
     DisallowHeapAllocation no_gc;
 
-    base::AccountingAllocator allocator;
+    AccountingAllocator allocator;
     bool ok = PrintAst(
         &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
         disassembly_os, nullptr);
@@ -208,7 +212,7 @@
     Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
     DisallowHeapAllocation no_gc;
 
-    v8::base::AccountingAllocator allocator;
+    AccountingAllocator allocator;
     bool ok = PrintAst(
         &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
         null_stream, &offset_table_vec);
diff --git a/src/wasm/wasm-external-refs.cc b/src/wasm/wasm-external-refs.cc
index 09294c2..4c4c91b 100644
--- a/src/wasm/wasm-external-refs.cc
+++ b/src/wasm/wasm-external-refs.cc
@@ -206,9 +206,6 @@
 void float64_pow_wrapper(double* param0, double* param1) {
   double x = ReadDoubleValue(param0);
   double y = ReadDoubleValue(param1);
-  if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
-    WriteDoubleValue(param0, std::numeric_limits<double>::quiet_NaN());
-  }
   WriteDoubleValue(param0, Pow(x, y));
 }
 }  // namespace wasm
diff --git a/src/wasm/wasm-interpreter.cc b/src/wasm/wasm-interpreter.cc
index 7e3127d..2ac681e 100644
--- a/src/wasm/wasm-interpreter.cc
+++ b/src/wasm/wasm-interpreter.cc
@@ -10,8 +10,8 @@
 #include "src/wasm/wasm-external-refs.h"
 #include "src/wasm/wasm-module.h"
 
-#include "src/base/accounting-allocator.h"
-#include "src/zone-containers.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -654,6 +654,48 @@
   return bit_cast<int64_t>(a);
 }
 
+static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
+                                        WasmModuleInstance* instance) {
+  // TODO(ahaas): Move memory allocation to wasm-module.cc for better
+  // encapsulation.
+  if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+    return -1;
+  }
+  uint32_t old_size = instance->mem_size;
+  uint32_t new_size;
+  byte* new_mem_start;
+  if (instance->mem_size == 0) {
+    if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+      return -1;
+    }
+    // TODO(gdeepti): Fix bounds check to take into account size of memtype.
+    new_size = delta_pages * wasm::WasmModule::kPageSize;
+    new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
+    if (!new_mem_start) {
+      return -1;
+    }
+  } else {
+    DCHECK_NOT_NULL(instance->mem_start);
+    new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
+    if (new_size >
+        wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
+      return -1;
+    }
+    new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
+    if (!new_mem_start) {
+      return -1;
+    }
+    // Zero initializing uninitialized memory from realloc
+    memset(new_mem_start + old_size, 0, new_size - old_size);
+  }
+  instance->mem_start = new_mem_start;
+  instance->mem_size = new_size;
+  // realloc
+  // update mem_start
+  // update mem_size
+  return static_cast<int32_t>(old_size / WasmModule::kPageSize);
+}
+
 enum InternalOpcode {
 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
   FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
@@ -680,54 +722,38 @@
  public:
   ControlTransferMap map_;
 
-  ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start,
-                   const byte* end)
+  ControlTransfers(Zone* zone, ModuleEnv* env, AstLocalDecls* locals,
+                   const byte* start, const byte* end)
       : map_(zone) {
-    // A control reference including from PC, from value depth, and whether
-    // a value is explicitly passed (e.g. br/br_if/br_table with value).
-    struct CRef {
-      const byte* pc;
-      sp_t value_depth;
-      bool explicit_value;
-    };
-
     // Represents a control flow label.
     struct CLabel : public ZoneObject {
       const byte* target;
-      size_t value_depth;
-      ZoneVector<CRef> refs;
+      ZoneVector<const byte*> refs;
 
-      CLabel(Zone* zone, size_t v)
-          : target(nullptr), value_depth(v), refs(zone) {}
+      explicit CLabel(Zone* zone) : target(nullptr), refs(zone) {}
 
       // Bind this label to the given PC.
-      void Bind(ControlTransferMap* map, const byte* start, const byte* pc,
-                bool expect_value) {
+      void Bind(ControlTransferMap* map, const byte* start, const byte* pc) {
         DCHECK_NULL(target);
         target = pc;
-        for (auto from : refs) {
-          auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
-          auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
-          ControlTransfer::StackAction action = ControlTransfer::kNoAction;
-          if (expect_value && !from.explicit_value) {
-            action = spdiff == 0 ? ControlTransfer::kPushVoid
-                                 : ControlTransfer::kPopAndRepush;
-          }
-          pc_t offset = static_cast<size_t>(from.pc - start);
-          (*map)[offset] = {pcdiff, spdiff, action};
+        for (auto from_pc : refs) {
+          auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
+          size_t offset = static_cast<size_t>(from_pc - start);
+          (*map)[offset] = pcdiff;
         }
       }
 
       // Reference this label from the given location.
-      void Ref(ControlTransferMap* map, const byte* start, CRef from) {
-        DCHECK_GE(from.value_depth, value_depth);
+      void Ref(ControlTransferMap* map, const byte* start,
+               const byte* from_pc) {
         if (target) {
-          auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
-          auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
-          pc_t offset = static_cast<size_t>(from.pc - start);
-          (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction};
+          // Target being bound before a reference means this is a loop.
+          DCHECK_EQ(kExprLoop, *target);
+          auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
+          size_t offset = static_cast<size_t>(from_pc - start);
+          (*map)[offset] = pcdiff;
         } else {
-          refs.push_back(from);
+          refs.push_back(from_pc);
         }
       }
     };
@@ -738,122 +764,104 @@
       CLabel* end_label;
       CLabel* else_label;
 
-      void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc,
-               size_t from_value_depth, bool explicit_value) {
-        end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value});
+      void Ref(ControlTransferMap* map, const byte* start,
+               const byte* from_pc) {
+        end_label->Ref(map, start, from_pc);
       }
     };
 
     // Compute the ControlTransfer map.
-    // This works by maintaining a stack of control constructs similar to the
+    // This algorithm maintains a stack of control constructs similar to the
     // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
     // bytecodes with their target, as well as determining whether the current
     // bytecodes are within the true or false block of an else.
-    // The value stack depth is tracked as {value_depth} and is needed to
-    // determine how many values to pop off the stack for explicit and
-    // implicit control flow.
-
     std::vector<Control> control_stack;
-    size_t value_depth = 0;
-    for (BytecodeIterator i(start + locals_encoded_size, end); i.has_next();
-         i.next()) {
+    CLabel* func_label = new (zone) CLabel(zone);
+    control_stack.push_back({start, func_label, nullptr});
+    for (BytecodeIterator i(start, end, locals); i.has_next(); i.next()) {
       WasmOpcode opcode = i.current();
-      TRACE("@%u: control %s (depth = %zu)\n", i.pc_offset(),
-            WasmOpcodes::OpcodeName(opcode), value_depth);
+      TRACE("@%u: control %s\n", i.pc_offset(),
+            WasmOpcodes::OpcodeName(opcode));
       switch (opcode) {
         case kExprBlock: {
-          TRACE("control @%u $%zu: Block\n", i.pc_offset(), value_depth);
-          CLabel* label = new (zone) CLabel(zone, value_depth);
+          TRACE("control @%u: Block\n", i.pc_offset());
+          CLabel* label = new (zone) CLabel(zone);
           control_stack.push_back({i.pc(), label, nullptr});
           break;
         }
         case kExprLoop: {
-          TRACE("control @%u $%zu: Loop\n", i.pc_offset(), value_depth);
-          CLabel* label1 = new (zone) CLabel(zone, value_depth);
-          CLabel* label2 = new (zone) CLabel(zone, value_depth);
-          control_stack.push_back({i.pc(), label1, nullptr});
-          control_stack.push_back({i.pc(), label2, nullptr});
-          label2->Bind(&map_, start, i.pc(), false);
+          TRACE("control @%u: Loop\n", i.pc_offset());
+          CLabel* label = new (zone) CLabel(zone);
+          control_stack.push_back({i.pc(), label, nullptr});
+          label->Bind(&map_, start, i.pc());
           break;
         }
         case kExprIf: {
-          TRACE("control @%u $%zu: If\n", i.pc_offset(), value_depth);
-          value_depth--;
-          CLabel* end_label = new (zone) CLabel(zone, value_depth);
-          CLabel* else_label = new (zone) CLabel(zone, value_depth);
+          TRACE("control @%u: If\n", i.pc_offset());
+          CLabel* end_label = new (zone) CLabel(zone);
+          CLabel* else_label = new (zone) CLabel(zone);
           control_stack.push_back({i.pc(), end_label, else_label});
-          else_label->Ref(&map_, start, {i.pc(), value_depth, false});
+          else_label->Ref(&map_, start, i.pc());
           break;
         }
         case kExprElse: {
           Control* c = &control_stack.back();
-          TRACE("control @%u $%zu: Else\n", i.pc_offset(), value_depth);
-          c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
-          value_depth = c->end_label->value_depth;
+          TRACE("control @%u: Else\n", i.pc_offset());
+          c->end_label->Ref(&map_, start, i.pc());
           DCHECK_NOT_NULL(c->else_label);
-          c->else_label->Bind(&map_, start, i.pc() + 1, false);
+          c->else_label->Bind(&map_, start, i.pc() + 1);
           c->else_label = nullptr;
           break;
         }
         case kExprEnd: {
           Control* c = &control_stack.back();
-          TRACE("control @%u $%zu: End\n", i.pc_offset(), value_depth);
+          TRACE("control @%u: End\n", i.pc_offset());
           if (c->end_label->target) {
             // only loops have bound labels.
             DCHECK_EQ(kExprLoop, *c->pc);
-            control_stack.pop_back();
-            c = &control_stack.back();
+          } else {
+            if (c->else_label) c->else_label->Bind(&map_, start, i.pc());
+            c->end_label->Bind(&map_, start, i.pc() + 1);
           }
-          if (c->else_label)
-            c->else_label->Bind(&map_, start, i.pc() + 1, true);
-          c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
-          c->end_label->Bind(&map_, start, i.pc() + 1, true);
-          value_depth = c->end_label->value_depth + 1;
           control_stack.pop_back();
           break;
         }
         case kExprBr: {
           BreakDepthOperand operand(&i, i.pc());
-          TRACE("control @%u $%zu: Br[arity=%u, depth=%u]\n", i.pc_offset(),
-                value_depth, operand.arity, operand.depth);
-          value_depth -= operand.arity;
-          control_stack[control_stack.size() - operand.depth - 1].Ref(
-              &map_, start, i.pc(), value_depth, operand.arity > 0);
-          value_depth++;
+          TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
+          Control* c = &control_stack[control_stack.size() - operand.depth - 1];
+          c->Ref(&map_, start, i.pc());
           break;
         }
         case kExprBrIf: {
           BreakDepthOperand operand(&i, i.pc());
-          TRACE("control @%u $%zu: BrIf[arity=%u, depth=%u]\n", i.pc_offset(),
-                value_depth, operand.arity, operand.depth);
-          value_depth -= (operand.arity + 1);
-          control_stack[control_stack.size() - operand.depth - 1].Ref(
-              &map_, start, i.pc(), value_depth, operand.arity > 0);
-          value_depth++;
+          TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
+          Control* c = &control_stack[control_stack.size() - operand.depth - 1];
+          c->Ref(&map_, start, i.pc());
           break;
         }
         case kExprBrTable: {
           BranchTableOperand operand(&i, i.pc());
-          TRACE("control @%u $%zu: BrTable[arity=%u count=%u]\n", i.pc_offset(),
-                value_depth, operand.arity, operand.table_count);
-          value_depth -= (operand.arity + 1);
-          for (uint32_t j = 0; j < operand.table_count + 1; ++j) {
-            uint32_t target = operand.read_entry(&i, j);
-            control_stack[control_stack.size() - target - 1].Ref(
-                &map_, start, i.pc() + j, value_depth, operand.arity > 0);
+          BranchTableIterator iterator(&i, operand);
+          TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
+                operand.table_count);
+          while (iterator.has_next()) {
+            uint32_t j = iterator.cur_index();
+            uint32_t target = iterator.next();
+            Control* c = &control_stack[control_stack.size() - target - 1];
+            c->Ref(&map_, start, i.pc() + j);
           }
-          value_depth++;
           break;
         }
         default: {
-          value_depth = value_depth - OpcodeArity(i.pc(), end) + 1;
           break;
         }
       }
     }
+    if (!func_label->target) func_label->Bind(&map_, start, end);
   }
 
-  ControlTransfer Lookup(pc_t from) {
+  pcdiff_t Lookup(pc_t from) {
     auto result = map_.find(from);
     if (result == map_.end()) {
       V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
@@ -899,7 +907,7 @@
     if (function->func_index < interpreter_code_.size()) {
       InterpreterCode* code = &interpreter_code_[function->func_index];
       DCHECK_EQ(function, code->function);
-      return code;
+      return Preprocess(code);
     }
     return nullptr;
   }
@@ -923,9 +931,9 @@
     if (code->targets == nullptr && code->start) {
       // Compute the control targets map and the local declarations.
       CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
-      code->targets =
-          new (zone_) ControlTransfers(zone_, code->locals.decls_encoded_size,
-                                       code->orig_start, code->orig_end);
+      ModuleEnv env = {module_, nullptr, kWasmOrigin};
+      code->targets = new (zone_) ControlTransfers(
+          zone_, &env, &code->locals, code->orig_start, code->orig_end);
     }
     return code;
   }
@@ -964,6 +972,7 @@
         instance_(instance),
         stack_(zone),
         frames_(zone),
+        blocks_(zone),
         state_(WasmInterpreter::STOPPED),
         break_pc_(kInvalidPc),
         trap_reason_(kTrapCount) {}
@@ -984,6 +993,9 @@
       stack_.push_back(args[i]);
     }
     frames_.back().ret_pc = InitLocals(code);
+    blocks_.push_back(
+        {0, stack_.size(), frames_.size(),
+         static_cast<uint32_t>(code->function->sig->return_count())});
     TRACE("  => PushFrame(#%u @%zu)\n", code->function->func_index,
           frames_.back().ret_pc);
   }
@@ -1032,11 +1044,11 @@
     return nullptr;
   }
 
-  virtual WasmVal GetReturnValue() {
+  virtual WasmVal GetReturnValue(int index) {
     if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
     CHECK_EQ(WasmInterpreter::FINISHED, state_);
-    CHECK_EQ(1, stack_.size());
-    return stack_[0];
+    CHECK_LT(static_cast<size_t>(index), stack_.size());
+    return stack_[index];
   }
 
   virtual pc_t GetBreakpointPc() { return break_pc_; }
@@ -1060,10 +1072,18 @@
     sp_t llimit() { return plimit() + code->locals.total_local_count; }
   };
 
+  struct Block {
+    pc_t pc;
+    sp_t sp;
+    size_t fp;
+    unsigned arity;
+  };
+
   CodeMap* codemap_;
   WasmModuleInstance* instance_;
   ZoneVector<WasmVal> stack_;
   ZoneVector<Frame> frames_;
+  ZoneVector<Block> blocks_;
   WasmInterpreter::State state_;
   pc_t break_pc_;
   TrapReason trap_reason_;
@@ -1088,6 +1108,9 @@
     DCHECK_GE(stack_.size(), arity);
     // The parameters will overlap the arguments already on the stack.
     frames_.push_back({code, 0, 0, stack_.size() - arity});
+    blocks_.push_back(
+        {0, stack_.size(), frames_.size(),
+         static_cast<uint32_t>(code->function->sig->return_count())});
     frames_.back().ret_pc = InitLocals(code);
     TRACE("  => push func#%u @%zu\n", code->function->func_index,
           frames_.back().ret_pc);
@@ -1126,21 +1149,38 @@
 
   bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
     if (pc == break_pc_) {
+      // Skip the previously hit breakpoint when resuming.
       break_pc_ = kInvalidPc;
       return true;
     }
     return false;
   }
 
-  bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) {
+  int LookupTarget(InterpreterCode* code, pc_t pc) {
+    return static_cast<int>(code->targets->Lookup(pc));
+  }
+
+  int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
+    size_t bp = blocks_.size() - depth - 1;
+    Block* target = &blocks_[bp];
+    DoStackTransfer(target->sp, target->arity);
+    blocks_.resize(bp);
+    return LookupTarget(code, pc);
+  }
+
+  bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
     DCHECK_GT(frames_.size(), 0u);
-    stack_.resize(frames_.back().sp);
+    // Pop all blocks for this frame.
+    while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
+      blocks_.pop_back();
+    }
+
+    sp_t dest = frames_.back().sp;
     frames_.pop_back();
     if (frames_.size() == 0) {
-      // A return from the top frame terminates the execution.
+      // A return from the last frame terminates the execution.
       state_ = WasmInterpreter::FINISHED;
-      stack_.clear();
-      stack_.push_back(val);
+      DoStackTransfer(0, arity);
       TRACE("  => finish\n");
       return false;
     } else {
@@ -1149,16 +1189,8 @@
       *code = top->code;
       *pc = top->ret_pc;
       *limit = top->code->end - top->code->start;
-      if (top->code->start[top->call_pc] == kExprCallIndirect ||
-          (top->code->orig_start &&
-           top->code->orig_start[top->call_pc] == kExprCallIndirect)) {
-        // UGLY: An indirect call has the additional function index on the
-        // stack.
-        stack_.pop_back();
-      }
       TRACE("  => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
-
-      stack_.push_back(val);
+      DoStackTransfer(dest, arity);
       return true;
     }
   }
@@ -1169,31 +1201,21 @@
     *limit = target->end - target->start;
   }
 
-  // Adjust the program counter {pc} and the stack contents according to the
-  // code's precomputed control transfer map. Returns the different between
-  // the new pc and the old pc.
-  int DoControlTransfer(InterpreterCode* code, pc_t pc) {
-    auto target = code->targets->Lookup(pc);
-    switch (target.action) {
-      case ControlTransfer::kNoAction:
-        TRACE("  action [sp-%u]\n", target.spdiff);
-        PopN(target.spdiff);
-        break;
-      case ControlTransfer::kPopAndRepush: {
-        WasmVal val = Pop();
-        TRACE("  action [pop x, sp-%u, push x]\n", target.spdiff - 1);
-        DCHECK_GE(target.spdiff, 1u);
-        PopN(target.spdiff - 1);
-        Push(pc, val);
-        break;
-      }
-      case ControlTransfer::kPushVoid:
-        TRACE("  action [sp-%u, push void]\n", target.spdiff);
-        PopN(target.spdiff);
-        Push(pc, WasmVal());
-        break;
+  // Copies {arity} values on the top of the stack down the stack to {dest},
+  // dropping the values in-between.
+  void DoStackTransfer(sp_t dest, size_t arity) {
+    // before: |---------------| pop_count | arity |
+    //         ^ 0             ^ dest              ^ stack_.size()
+    //
+    // after:  |---------------| arity |
+    //         ^ 0                     ^ stack_.size()
+    DCHECK_LE(dest, stack_.size());
+    DCHECK_LE(dest + arity, stack_.size());
+    size_t pop_count = stack_.size() - dest - arity;
+    for (size_t i = 0; i < arity; i++) {
+      stack_[dest + i] = stack_[dest + pop_count + i];
     }
-    return target.pcdiff;
+    stack_.resize(stack_.size() - pop_count);
   }
 
   void Execute(InterpreterCode* code, pc_t pc, int max) {
@@ -1209,8 +1231,8 @@
       if (pc >= limit) {
         // Fell off end of code; do an implicit return.
         TRACE("@%-3zu: ImplicitReturn\n", pc);
-        WasmVal val = PopArity(code->function->sig->return_count());
-        if (!DoReturn(&code, &pc, &limit, val)) return;
+        if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
+          return;
         decoder.Reset(code->start, code->end);
         continue;
       }
@@ -1243,27 +1265,37 @@
 
       switch (orig) {
         case kExprNop:
-          Push(pc, WasmVal());
           break;
-        case kExprBlock:
+        case kExprBlock: {
+          BlockTypeOperand operand(&decoder, code->at(pc));
+          blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
+          len = 1 + operand.length;
+          break;
+        }
         case kExprLoop: {
-          // Do nothing.
+          BlockTypeOperand operand(&decoder, code->at(pc));
+          blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
+          len = 1 + operand.length;
           break;
         }
         case kExprIf: {
+          BlockTypeOperand operand(&decoder, code->at(pc));
           WasmVal cond = Pop();
           bool is_true = cond.to<uint32_t>() != 0;
+          blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
           if (is_true) {
             // fall through to the true block.
+            len = 1 + operand.length;
             TRACE("  true => fallthrough\n");
           } else {
-            len = DoControlTransfer(code, pc);
+            len = LookupTarget(code, pc);
             TRACE("  false => @%zu\n", pc + len);
           }
           break;
         }
         case kExprElse: {
-          len = DoControlTransfer(code, pc);
+          blocks_.pop_back();
+          len = LookupTarget(code, pc);
           TRACE("  end => @%zu\n", pc + len);
           break;
         }
@@ -1276,42 +1308,34 @@
         }
         case kExprBr: {
           BreakDepthOperand operand(&decoder, code->at(pc));
-          WasmVal val = PopArity(operand.arity);
-          len = DoControlTransfer(code, pc);
+          len = DoBreak(code, pc, operand.depth);
           TRACE("  br => @%zu\n", pc + len);
-          if (operand.arity > 0) Push(pc, val);
           break;
         }
         case kExprBrIf: {
           BreakDepthOperand operand(&decoder, code->at(pc));
           WasmVal cond = Pop();
-          WasmVal val = PopArity(operand.arity);
           bool is_true = cond.to<uint32_t>() != 0;
           if (is_true) {
-            len = DoControlTransfer(code, pc);
+            len = DoBreak(code, pc, operand.depth);
             TRACE("  br_if => @%zu\n", pc + len);
-            if (operand.arity > 0) Push(pc, val);
           } else {
             TRACE("  false => fallthrough\n");
             len = 1 + operand.length;
-            Push(pc, WasmVal());
           }
           break;
         }
         case kExprBrTable: {
           BranchTableOperand operand(&decoder, code->at(pc));
           uint32_t key = Pop().to<uint32_t>();
-          WasmVal val = PopArity(operand.arity);
           if (key >= operand.table_count) key = operand.table_count;
-          len = DoControlTransfer(code, pc + key) + key;
-          TRACE("  br[%u] => @%zu\n", key, pc + len);
-          if (operand.arity > 0) Push(pc, val);
+          len = key + DoBreak(code, pc + key, operand.table[key]);
+          TRACE("  br[%u] => @%zu\n", key, pc + key + len);
           break;
         }
         case kExprReturn: {
-          ReturnArityOperand operand(&decoder, code->at(pc));
-          WasmVal val = PopArity(operand.arity);
-          if (!DoReturn(&code, &pc, &limit, val)) return;
+          size_t arity = code->function->sig->return_count();
+          if (!DoReturn(&code, &pc, &limit, arity)) return;
           decoder.Reset(code->start, code->end);
           continue;
         }
@@ -1320,8 +1344,7 @@
           return CommitPc(pc);
         }
         case kExprEnd: {
-          len = DoControlTransfer(code, pc);
-          DCHECK_EQ(1, len);
+          blocks_.pop_back();
           break;
         }
         case kExprI8Const: {
@@ -1364,10 +1387,21 @@
           LocalIndexOperand operand(&decoder, code->at(pc));
           WasmVal val = Pop();
           stack_[frames_.back().sp + operand.index] = val;
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprTeeLocal: {
+          LocalIndexOperand operand(&decoder, code->at(pc));
+          WasmVal val = Pop();
+          stack_[frames_.back().sp + operand.index] = val;
           Push(pc, val);
           len = 1 + operand.length;
           break;
         }
+        case kExprDrop: {
+          Pop();
+          break;
+        }
         case kExprCallFunction: {
           CallFunctionOperand operand(&decoder, code->at(pc));
           InterpreterCode* target = codemap()->GetCode(operand.index);
@@ -1378,9 +1412,7 @@
         }
         case kExprCallIndirect: {
           CallIndirectOperand operand(&decoder, code->at(pc));
-          size_t index = stack_.size() - operand.arity - 1;
-          DCHECK_LT(index, stack_.size());
-          uint32_t entry_index = stack_[index].to<uint32_t>();
+          uint32_t entry_index = Pop().to<uint32_t>();
           // Assume only one table for now.
           DCHECK_LE(module()->function_tables.size(), 1u);
           InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
@@ -1395,10 +1427,6 @@
           decoder.Reset(code->start, code->end);
           continue;
         }
-        case kExprCallImport: {
-          UNIMPLEMENTED();
-          break;
-        }
         case kExprGetGlobal: {
           GlobalIndexOperand operand(&decoder, code->at(pc));
           const WasmGlobal* global = &module()->globals[operand.index];
@@ -1437,14 +1465,13 @@
           } else {
             UNREACHABLE();
           }
-          Push(pc, val);
           len = 1 + operand.length;
           break;
         }
 
 #define LOAD_CASE(name, ctype, mtype)                                       \
   case kExpr##name: {                                                       \
-    MemoryAccessOperand operand(&decoder, code->at(pc));                    \
+    MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype));     \
     uint32_t index = Pop().to<uint32_t>();                                  \
     size_t effective_mem_size = instance()->mem_size - sizeof(mtype);       \
     if (operand.offset > effective_mem_size ||                              \
@@ -1476,7 +1503,7 @@
 
 #define STORE_CASE(name, ctype, mtype)                                        \
   case kExpr##name: {                                                         \
-    MemoryAccessOperand operand(&decoder, code->at(pc));                      \
+    MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype));       \
     WasmVal val = Pop();                                                      \
     uint32_t index = Pop().to<uint32_t>();                                    \
     size_t effective_mem_size = instance()->mem_size - sizeof(mtype);         \
@@ -1486,7 +1513,6 @@
     }                                                                         \
     byte* addr = instance()->mem_start + operand.offset + index;              \
     WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
-    Push(pc, val);                                                            \
     len = 1 + operand.length;                                                 \
     break;                                                                    \
   }
@@ -1546,9 +1572,14 @@
           ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
           ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
 #undef ASMJS_STORE_CASE
-
+        case kExprGrowMemory: {
+          uint32_t delta_pages = Pop().to<uint32_t>();
+          Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
+          break;
+        }
         case kExprMemorySize: {
-          Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size)));
+          Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
+                                                 WasmModule::kPageSize)));
           break;
         }
 #define EXECUTE_SIMPLE_BINOP(name, ctype, op)             \
@@ -1623,7 +1654,7 @@
 
   void Push(pc_t pc, WasmVal val) {
     // TODO(titzer): store PC as well?
-    stack_.push_back(val);
+    if (val.type != kAstStmt) stack_.push_back(val);
   }
 
   void TraceStack(const char* phase, pc_t pc) {
@@ -1700,7 +1731,7 @@
 // Implementation of the public interface of the interpreter.
 //============================================================================
 WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
-                                 base::AccountingAllocator* allocator)
+                                 AccountingAllocator* allocator)
     : zone_(allocator),
       internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
 
@@ -1804,7 +1835,7 @@
 
 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
     Zone* zone, const byte* start, const byte* end) {
-  ControlTransfers targets(zone, 0, start, end);
+  ControlTransfers targets(zone, nullptr, nullptr, start, end);
   return targets.map_;
 }
 
diff --git a/src/wasm/wasm-interpreter.h b/src/wasm/wasm-interpreter.h
index b106a20..b61e092 100644
--- a/src/wasm/wasm-interpreter.h
+++ b/src/wasm/wasm-interpreter.h
@@ -6,7 +6,7 @@
 #define V8_WASM_INTERPRETER_H_
 
 #include "src/wasm/wasm-opcodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace base {
@@ -28,15 +28,7 @@
 
 const pc_t kInvalidPc = 0x80000000;
 
-// Visible for testing. A {ControlTransfer} helps the interpreter figure out
-// the target program counter and stack manipulations for a branch.
-struct ControlTransfer {
-  enum StackAction { kNoAction, kPopAndRepush, kPushVoid };
-  pcdiff_t pcdiff;  // adjustment to the program counter (positive or negative).
-  spdiff_t spdiff;  // number of elements to pop off the stack.
-  StackAction action;  // action to perform on the stack.
-};
-typedef ZoneMap<pc_t, ControlTransfer> ControlTransferMap;
+typedef ZoneMap<pc_t, pcdiff_t> ControlTransferMap;
 
 // Macro for defining union members.
 #define FOREACH_UNION_MEMBER(V) \
@@ -102,7 +94,7 @@
 };
 
 // An interpreter capable of executing WASM.
-class WasmInterpreter {
+class V8_EXPORT_PRIVATE WasmInterpreter {
  public:
   // State machine for a Thread:
   //                       +---------------Run()-----------+
@@ -132,15 +124,14 @@
     virtual int GetFrameCount() = 0;
     virtual const WasmFrame* GetFrame(int index) = 0;
     virtual WasmFrame* GetMutableFrame(int index) = 0;
-    virtual WasmVal GetReturnValue() = 0;
+    virtual WasmVal GetReturnValue(int index = 0) = 0;
 
     // Thread-specific breakpoints.
     bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
     bool GetBreakpoint(const WasmFunction* function, int pc);
   };
 
-  WasmInterpreter(WasmModuleInstance* instance,
-                  base::AccountingAllocator* allocator);
+  WasmInterpreter(WasmModuleInstance* instance, AccountingAllocator* allocator);
   ~WasmInterpreter();
 
   //==========================================================================
@@ -190,9 +181,8 @@
   bool SetFunctionCodeForTesting(const WasmFunction* function,
                                  const byte* start, const byte* end);
 
-  // Computes the control targets for the given bytecode as {pc offset, sp
-  // offset}
-  // pairs. Used internally in the interpreter, but exposed for testing.
+  // Computes the control transfers for the given bytecode. Used internally in
+  // the interpreter, but exposed for testing.
   static ControlTransferMap ComputeControlTransfersForTesting(Zone* zone,
                                                               const byte* start,
                                                               const byte* end);
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 10ae43c..254fd70 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -9,8 +9,6 @@
 #include "src/asmjs/asm-wasm-builder.h"
 #include "src/assert-scope.h"
 #include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/compiler.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/handles.h"
@@ -18,7 +16,6 @@
 #include "src/objects.h"
 #include "src/parsing/parse-info.h"
 
-#include "src/wasm/encoder.h"
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-js.h"
 #include "src/wasm/wasm-module.h"
@@ -31,6 +28,13 @@
 namespace v8 {
 
 namespace {
+i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
+  return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+Local<String> v8_str(Isolate* isolate, const char* str) {
+  return Utils::ToLocal(v8_str(reinterpret_cast<i::Isolate*>(isolate), str));
+}
+
 struct RawBuffer {
   const byte* start;
   const byte* end;
@@ -80,7 +84,7 @@
   ErrorThrower thrower(isolate, "Wasm.verifyModule()");
 
   if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be a buffer source");
+    thrower.TypeError("Argument 0 must be a buffer source");
     return;
   }
   RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -104,7 +108,7 @@
   ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
 
   if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be a buffer source");
+    thrower.TypeError("Argument 0 must be a buffer source");
     return;
   }
   RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -135,13 +139,11 @@
   // Decode but avoid a redundant pass over function bodies for verification.
   // Verification will happen during compilation.
   i::Zone zone(isolate->allocator());
-  internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, start, end, false, origin);
-
+  i::MaybeHandle<i::JSObject> module_object =
+      i::wasm::CreateModuleObjectFromBytes(isolate, start, end, thrower,
+                                           origin);
   i::MaybeHandle<i::JSObject> object;
-  if (result.failed()) {
-    thrower->Failed("", result);
-  } else {
+  if (!module_object.is_null()) {
     // Success. Instantiate the module and return the object.
     i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
     if (args.Length() > 1 && args[1]->IsObject()) {
@@ -156,19 +158,12 @@
       memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
     }
 
-    i::MaybeHandle<i::FixedArray> compiled_module =
-        result.val->CompileFunctions(isolate, thrower);
-    if (!thrower->error()) {
-      DCHECK(!compiled_module.is_null());
-      object = i::wasm::WasmModule::Instantiate(
-          isolate, compiled_module.ToHandleChecked(), ffi, memory);
-      if (!object.is_null()) {
-        args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
-      }
+    object = i::wasm::WasmModule::Instantiate(
+        isolate, thrower, module_object.ToHandleChecked(), ffi, memory);
+    if (!object.is_null()) {
+      args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
     }
   }
-
-  if (result.val) delete result.val;
   return object;
 }
 
@@ -178,7 +173,7 @@
   ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
 
   if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be a buffer source");
+    thrower.TypeError("Argument 0 must be a buffer source");
     return;
   }
   RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -197,20 +192,37 @@
   if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
 
   DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
-  i::Zone zone(i_isolate->allocator());
-  i::wasm::ModuleResult result = i::wasm::DecodeWasmModule(
-      i_isolate, &zone, buffer.start, buffer.end, false, i::wasm::kWasmOrigin);
-  std::unique_ptr<const i::wasm::WasmModule> decoded_module(result.val);
-  if (result.failed()) {
-    thrower->Failed("", result);
-    return nothing;
-  }
-  i::MaybeHandle<i::FixedArray> compiled_module =
-      decoded_module->CompileFunctions(i_isolate, thrower);
-  if (compiled_module.is_null()) return nothing;
+  return i::wasm::CreateModuleObjectFromBytes(
+      i_isolate, buffer.start, buffer.end, thrower,
+      i::wasm::ModuleOrigin::kWasmOrigin);
+}
 
-  return i::wasm::CreateCompiledModuleObject(i_isolate,
-                                             compiled_module.ToHandleChecked());
+static bool ValidateModule(v8::Isolate* isolate,
+                           const v8::Local<v8::Value> source,
+                           ErrorThrower* thrower) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::MaybeHandle<i::JSObject> nothing;
+
+  RawBuffer buffer = GetRawBufferSource(source, thrower);
+  if (buffer.start == nullptr) return false;
+
+  DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
+  return i::wasm::ValidateModuleBytes(i_isolate, buffer.start, buffer.end,
+                                      thrower,
+                                      i::wasm::ModuleOrigin::kWasmOrigin);
+}
+
+bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
+                i::Handle<i::Symbol> sym, const char* msg) {
+  if (value->IsJSObject()) {
+    i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+    Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
+    if (has_brand.IsNothing()) return false;
+    if (has_brand.ToChecked()) return true;
+  }
+  v8::Local<v8::Value> e = v8::Exception::TypeError(v8_str(isolate, msg));
+  isolate->ThrowException(e);
+  return false;
 }
 
 void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -220,7 +232,7 @@
                        "WebAssembly.compile()");
 
   if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be a buffer source");
+    thrower.TypeError("Argument 0 must be a buffer source");
     return;
   }
   i::MaybeHandle<i::JSObject> module_obj =
@@ -238,6 +250,25 @@
   return_value.Set(resolver->GetPromise());
 }
 
+void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  HandleScope scope(isolate);
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.validate()");
+
+  if (args.Length() < 1) {
+    thrower.TypeError("Argument 0 must be a buffer source");
+    return;
+  }
+
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  if (ValidateModule(isolate, args[0], &thrower)) {
+    return_value.Set(v8::True(isolate));
+  } else {
+    return_value.Set(v8::False(isolate));
+  }
+}
+
 void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
   HandleScope scope(isolate);
@@ -245,7 +276,7 @@
                        "WebAssembly.Module()");
 
   if (args.Length() < 1) {
-    thrower.Error("Argument 0 must be a buffer source");
+    thrower.TypeError("Argument 0 must be a buffer source");
     return;
   }
   i::MaybeHandle<i::JSObject> module_obj =
@@ -264,18 +295,15 @@
   ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
 
   if (args.Length() < 1) {
-    thrower.Error(
-        "Argument 0 must be provided, and must be a WebAssembly.Module object");
+    thrower.TypeError("Argument 0 must be a WebAssembly.Module");
     return;
   }
 
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  i::Handle<i::Symbol> module_sym(i_context->wasm_module_sym());
-  i::MaybeHandle<i::Object> source =
-      i::Object::GetProperty(Utils::OpenHandle(*args[0]), module_sym);
-  if (source.is_null() || source.ToHandleChecked()->IsUndefined(i_isolate)) {
-    thrower.Error("Argument 0 must be a WebAssembly.Module");
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args[0]),
+                  i::Handle<i::Symbol>(i_context->wasm_module_sym()),
+                  "Argument 0 must be a WebAssembly.Module")) {
     return;
   }
 
@@ -285,13 +313,10 @@
       i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
   if (module_obj->GetInternalFieldCount() < 1 ||
       !module_obj->GetInternalField(0)->IsFixedArray()) {
-    thrower.Error("Argument 0 is an invalid WebAssembly.Module");
+    thrower.TypeError("Argument 0 is an invalid WebAssembly.Module");
     return;
   }
 
-  i::Handle<i::FixedArray> compiled_code = i::Handle<i::FixedArray>(
-      i::FixedArray::cast(module_obj->GetInternalField(0)));
-
   i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
   if (args.Length() > 1 && args[1]->IsObject()) {
     Local<Object> obj = Local<Object>::Cast(args[1]);
@@ -304,17 +329,211 @@
     i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
     memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
   }
-  i::MaybeHandle<i::JSObject> instance =
-      i::wasm::WasmModule::Instantiate(i_isolate, compiled_code, ffi, memory);
+  i::MaybeHandle<i::JSObject> instance = i::wasm::WasmModule::Instantiate(
+      i_isolate, &thrower, module_obj, ffi, memory);
   if (instance.is_null()) {
-    thrower.Error("Could not instantiate module");
+    if (!thrower.error()) thrower.Error("Could not instantiate module");
     return;
   }
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
 }
+
+bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+                        Local<Context> context, Local<v8::Object> object,
+                        Local<String> property, int* result, int lower_bound,
+                        int upper_bound) {
+  v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
+  v8::Local<v8::Value> value;
+  if (maybe.ToLocal(&value)) {
+    int64_t number;
+    if (!value->IntegerValue(context).To(&number)) return false;
+    if (number < static_cast<int64_t>(lower_bound)) {
+      thrower->RangeError("Property value %" PRId64
+                          " is below the lower bound %d",
+                          number, lower_bound);
+      return false;
+    }
+    if (number > static_cast<int64_t>(upper_bound)) {
+      thrower->RangeError("Property value %" PRId64
+                          " is above the upper bound %d",
+                          number, upper_bound);
+      return false;
+    }
+    *result = static_cast<int>(number);
+    return true;
+  }
+  return false;
+}
+
+void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  HandleScope scope(isolate);
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.Module()");
+  if (args.Length() < 1 || !args[0]->IsObject()) {
+    thrower.TypeError("Argument 0 must be a table descriptor");
+    return;
+  }
+  Local<Context> context = isolate->GetCurrentContext();
+  Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+  // The descriptor's 'element'.
+  {
+    v8::MaybeLocal<v8::Value> maybe =
+        descriptor->Get(context, v8_str(isolate, "element"));
+    v8::Local<v8::Value> value;
+    if (!maybe.ToLocal(&value)) return;
+    v8::Local<v8::String> string;
+    if (!value->ToString(context).ToLocal(&string)) return;
+    bool equal;
+    if (!string->Equals(context, v8_str(isolate, "anyfunc")).To(&equal)) return;
+    if (!equal) {
+      thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
+      return;
+    }
+  }
+  const int max_table_size = 1 << 26;
+  // The descriptor's 'initial'.
+  int initial;
+  if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
+                          v8_str(isolate, "initial"), &initial, 0,
+                          max_table_size)) {
+    return;
+  }
+  // The descriptor's 'maximum'.
+  int maximum = 0;
+  Local<String> maximum_key = v8_str(isolate, "maximum");
+  Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
+
+  if (has_maximum.IsNothing()) {
+    // There has been an exception, just return.
+    return;
+  }
+  if (has_maximum.FromJust()) {
+    if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
+                            &maximum, initial, max_table_size)) {
+      return;
+    }
+  }
+
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::JSFunction> table_ctor(
+      i_isolate->native_context()->wasm_table_constructor());
+  i::Handle<i::JSObject> table_obj =
+      i_isolate->factory()->NewJSObject(table_ctor);
+  i::Handle<i::FixedArray> fixed_array =
+      i_isolate->factory()->NewFixedArray(initial);
+  i::Object* null = i_isolate->heap()->null_value();
+  for (int i = 0; i < initial; ++i) fixed_array->set(i, null);
+  table_obj->SetInternalField(0, *fixed_array);
+  table_obj->SetInternalField(
+      1, has_maximum.FromJust()
+             ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
+             : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
+  i::Handle<i::Symbol> table_sym(i_isolate->native_context()->wasm_table_sym());
+  i::Object::SetProperty(table_obj, table_sym, table_obj, i::STRICT).Check();
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(Utils::ToLocal(table_obj));
+}
+
+void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  HandleScope scope(isolate);
+  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+                       "WebAssembly.Module()");
+  if (args.Length() < 1 || !args[0]->IsObject()) {
+    thrower.TypeError("Argument 0 must be a table descriptor");
+    return;
+  }
+  Local<Context> context = isolate->GetCurrentContext();
+  Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+  // The descriptor's 'initial'.
+  int initial;
+  if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
+                          v8_str(isolate, "initial"), &initial, 0, 65536)) {
+    return;
+  }
+  // The descriptor's 'maximum'.
+  int maximum = 0;
+  Local<String> maximum_key = v8_str(isolate, "maximum");
+  Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
+
+  if (has_maximum.IsNothing()) {
+    // There has been an exception, just return.
+    return;
+  }
+  if (has_maximum.FromJust()) {
+    if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
+                            &maximum, initial, 65536)) {
+      return;
+    }
+  }
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::JSArrayBuffer> buffer =
+      i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
+  size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
+                static_cast<size_t>(initial);
+  i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
+
+  i::Handle<i::JSObject> memory_obj = i::WasmJs::CreateWasmMemoryObject(
+      i_isolate, buffer, has_maximum.FromJust(), maximum);
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(Utils::ToLocal(memory_obj));
+}
+void WebAssemblyTableGetLength(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(rossberg)
+}
+void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(rossberg)
+}
+void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(rossberg)
+}
+void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(rossberg)
+}
+void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(rossberg)
+}
+void WebAssemblyMemoryGetBuffer(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+                  "Receiver is not a WebAssembly.Memory")) {
+    return;
+  }
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::JSObject> receiver =
+      i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::Object> buffer(receiver->GetInternalField(0), i_isolate);
+  DCHECK(buffer->IsJSArrayBuffer());
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(Utils::ToLocal(buffer));
+}
 }  // namespace
 
+i::Handle<i::JSObject> i::WasmJs::CreateWasmMemoryObject(
+    i::Isolate* i_isolate, i::Handle<i::JSArrayBuffer> buffer, bool has_maximum,
+    int maximum) {
+  i::Handle<i::JSFunction> memory_ctor(
+      i_isolate->native_context()->wasm_memory_constructor());
+  i::Handle<i::JSObject> memory_obj =
+      i_isolate->factory()->NewJSObject(memory_ctor);
+  memory_obj->SetInternalField(0, *buffer);
+  memory_obj->SetInternalField(
+      1, has_maximum
+             ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
+             : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
+  i::Handle<i::Symbol> memory_sym(
+      i_isolate->native_context()->wasm_memory_sym());
+  i::Object::SetProperty(memory_obj, memory_sym, memory_obj, i::STRICT).Check();
+  return memory_obj;
+}
+
 // TODO(titzer): we use the API to create the function template because the
 // internal guts are too ugly to replicate here.
 static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -325,12 +544,9 @@
 }
 
 namespace internal {
-static Handle<String> v8_str(Isolate* isolate, const char* str) {
-  return isolate->factory()->NewStringFromAsciiChecked(str);
-}
 
-static Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
-                                      const char* str, FunctionCallback func) {
+Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
+                               const char* str, FunctionCallback func) {
   Handle<String> name = v8_str(isolate, str);
   Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
   Handle<JSFunction> function =
@@ -341,6 +557,112 @@
   return function;
 }
 
+Handle<JSFunction> InstallGetter(Isolate* isolate, Handle<JSObject> object,
+                                 const char* str, FunctionCallback func) {
+  Handle<String> name = v8_str(isolate, str);
+  Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+  Handle<JSFunction> function =
+      ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+  v8::PropertyAttribute attributes =
+      static_cast<v8::PropertyAttribute>(v8::DontDelete | v8::ReadOnly);
+  Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
+                                              Utils::ToLocal(function),
+                                              Local<Function>(), attributes);
+  return function;
+}
+
+void WasmJs::InstallWasmModuleSymbolIfNeeded(Isolate* isolate,
+                                             Handle<JSGlobalObject> global,
+                                             Handle<Context> context) {
+  if (!context->get(Context::WASM_MODULE_SYM_INDEX)->IsSymbol() ||
+      !context->get(Context::WASM_INSTANCE_SYM_INDEX)->IsSymbol()) {
+    InstallWasmMapsIfNeeded(isolate, isolate->native_context());
+    InstallWasmConstructors(isolate, isolate->global_object(),
+                            isolate->native_context());
+  }
+}
+
+void WasmJs::InstallWasmConstructors(Isolate* isolate,
+                                     Handle<JSGlobalObject> global,
+                                     Handle<Context> context) {
+  Factory* factory = isolate->factory();
+  // Create private symbols.
+  Handle<Symbol> module_sym = factory->NewPrivateSymbol();
+  context->set_wasm_module_sym(*module_sym);
+
+  Handle<Symbol> instance_sym = factory->NewPrivateSymbol();
+  context->set_wasm_instance_sym(*instance_sym);
+
+  Handle<Symbol> table_sym = factory->NewPrivateSymbol();
+  context->set_wasm_table_sym(*table_sym);
+
+  Handle<Symbol> memory_sym = factory->NewPrivateSymbol();
+  context->set_wasm_memory_sym(*memory_sym);
+
+  // Bind the WebAssembly object.
+  Handle<String> name = v8_str(isolate, "WebAssembly");
+  Handle<JSFunction> cons = factory->NewFunction(name);
+  JSFunction::SetInstancePrototype(
+      cons, Handle<Object>(context->initial_object_prototype(), isolate));
+  cons->shared()->set_instance_class_name(*name);
+  Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+  PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+  JSObject::AddProperty(global, name, wasm_object, attributes);
+
+  // Setup compile
+  InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+
+  // Setup compile
+  InstallFunc(isolate, wasm_object, "validate", WebAssemblyValidate);
+
+  // Setup Module
+  Handle<JSFunction> module_constructor =
+      InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+  context->set_wasm_module_constructor(*module_constructor);
+  Handle<JSObject> module_proto =
+      factory->NewJSObject(module_constructor, TENURED);
+  i::Handle<i::Map> map = isolate->factory()->NewMap(
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
+  JSFunction::SetInitialMap(module_constructor, map, module_proto);
+  JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
+                        module_constructor, DONT_ENUM);
+
+  // Setup Instance
+  Handle<JSFunction> instance_constructor =
+      InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+  context->set_wasm_instance_constructor(*instance_constructor);
+
+  // Setup Table
+  Handle<JSFunction> table_constructor =
+      InstallFunc(isolate, wasm_object, "Table", WebAssemblyTable);
+  context->set_wasm_table_constructor(*table_constructor);
+  Handle<JSObject> table_proto =
+      factory->NewJSObject(table_constructor, TENURED);
+  map = isolate->factory()->NewMap(
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+  JSFunction::SetInitialMap(table_constructor, map, table_proto);
+  JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
+                        table_constructor, DONT_ENUM);
+  InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
+  InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow);
+  InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet);
+  InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet);
+
+  // Setup Memory
+  Handle<JSFunction> memory_constructor =
+      InstallFunc(isolate, wasm_object, "Memory", WebAssemblyMemory);
+  context->set_wasm_memory_constructor(*memory_constructor);
+  Handle<JSObject> memory_proto =
+      factory->NewJSObject(memory_constructor, TENURED);
+  map = isolate->factory()->NewMap(
+      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+  JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
+  JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
+                        memory_constructor, DONT_ENUM);
+  InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
+  InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+}
+
 void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
   if (!FLAG_expose_wasm && !FLAG_validate_asm) {
     return;
@@ -350,7 +672,7 @@
 
   // Setup wasm function map.
   Handle<Context> context(global->native_context(), isolate);
-  InstallWasmFunctionMap(isolate, context);
+  InstallWasmMapsIfNeeded(isolate, context);
 
   if (!FLAG_expose_wasm) {
     return;
@@ -383,39 +705,11 @@
       JSObject::AddProperty(wasm_object, name, value, attributes);
     }
   }
-
-  // Create private symbols.
-  Handle<Symbol> module_sym = isolate->factory()->NewPrivateSymbol();
-  Handle<Symbol> instance_sym = isolate->factory()->NewPrivateSymbol();
-  context->set_wasm_module_sym(*module_sym);
-  context->set_wasm_instance_sym(*instance_sym);
-
-  // Bind the WebAssembly object.
-  Handle<String> name = v8_str(isolate, "WebAssembly");
-  Handle<JSFunction> cons = factory->NewFunction(name);
-  JSFunction::SetInstancePrototype(
-      cons, Handle<Object>(context->initial_object_prototype(), isolate));
-  cons->shared()->set_instance_class_name(*name);
-  Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
-  PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
-  JSObject::AddProperty(global, name, wasm_object, attributes);
-
-  // Install static methods on WebAssembly object.
-  InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
-  Handle<JSFunction> module_constructor =
-      InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
-  Handle<JSFunction> instance_constructor =
-      InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
-  i::Handle<i::Map> map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
-  module_constructor->set_prototype_or_initial_map(*map);
-  map->SetConstructor(*module_constructor);
-
-  context->set_wasm_module_constructor(*module_constructor);
-  context->set_wasm_instance_constructor(*instance_constructor);
+  InstallWasmConstructors(isolate, global, context);
 }
 
-void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
+void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
+                                     Handle<Context> context) {
   if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
     // TODO(titzer): Move this to bootstrapper.cc??
     // TODO(titzer): Also make one for strict mode functions?
diff --git a/src/wasm/wasm-js.h b/src/wasm/wasm-js.h
index ded9a1a..4f26494 100644
--- a/src/wasm/wasm-js.h
+++ b/src/wasm/wasm-js.h
@@ -5,13 +5,8 @@
 #ifndef V8_WASM_JS_H_
 #define V8_WASM_JS_H_
 
-#ifndef V8_SHARED
 #include "src/allocation.h"
 #include "src/base/hashmap.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif  // !V8_SHARED
 
 namespace v8 {
 namespace internal {
@@ -19,7 +14,19 @@
 class WasmJs {
  public:
   static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
-  static void InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context);
+
+  V8_EXPORT_PRIVATE static void InstallWasmModuleSymbolIfNeeded(
+      Isolate* isolate, Handle<JSGlobalObject> global, Handle<Context> context);
+
+  V8_EXPORT_PRIVATE static void InstallWasmMapsIfNeeded(
+      Isolate* isolate, Handle<Context> context);
+  static void InstallWasmConstructors(Isolate* isolate,
+                                      Handle<JSGlobalObject> global,
+                                      Handle<Context> context);
+
+  static Handle<JSObject> CreateWasmMemoryObject(Isolate* isolate,
+                                                 Handle<JSArrayBuffer> buffer,
+                                                 bool has_maximum, int maximum);
 };
 
 }  // namespace internal
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index abd57d5..fd10a39 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -7,7 +7,7 @@
 
 #include "src/wasm/wasm-opcodes.h"
 
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 #define U32_LE(v)                                    \
   static_cast<byte>(v), static_cast<byte>((v) >> 8), \
@@ -17,17 +17,17 @@
 
 #define WASM_MODULE_HEADER U32_LE(kWasmMagic), U32_LE(kWasmVersion)
 
-#define SIG_INDEX(v) U16_LE(v)
-// TODO(binji): make SIG_INDEX match this.
 #define IMPORT_SIG_INDEX(v) U32V_1(v)
 #define FUNC_INDEX(v) U32V_1(v)
+#define TABLE_INDEX(v) U32V_1(v)
 #define NO_NAME U32V_1(0)
 #define NAME_LENGTH(v) U32V_1(v)
+#define ENTRY_COUNT(v) U32V_1(v)
 
 #define ZERO_ALIGNMENT 0
 #define ZERO_OFFSET 0
 
-#define BR_TARGET(v) U32_LE(v)
+#define BR_TARGET(v) U32V_1(v)
 
 #define MASK_7 ((1 << 7) - 1)
 #define MASK_14 ((1 << 14) - 1)
@@ -62,36 +62,76 @@
 
 #define ARITY_0 0
 #define ARITY_1 1
+#define ARITY_2 2
 #define DEPTH_0 0
 #define DEPTH_1 1
+#define DEPTH_2 2
+#define ARITY_2 2
 
-#define WASM_BLOCK(...) kExprBlock, __VA_ARGS__, kExprEnd
-#define WASM_INFINITE_LOOP kExprLoop, kExprBr, ARITY_0, DEPTH_0, kExprEnd
-#define WASM_LOOP(...) kExprLoop, __VA_ARGS__, kExprEnd
-#define WASM_IF(cond, tstmt) cond, kExprIf, tstmt, kExprEnd
+#define WASM_BLOCK(...) kExprBlock, kLocalVoid, __VA_ARGS__, kExprEnd
+
+#define WASM_BLOCK_T(t, ...)                                       \
+  kExprBlock, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), \
+      __VA_ARGS__, kExprEnd
+
+#define WASM_BLOCK_TT(t1, t2, ...)                                       \
+  kExprBlock, kMultivalBlock, 0,                                         \
+      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)),              \
+      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), __VA_ARGS__, \
+      kExprEnd
+
+#define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_L(...) kExprBlock, kLocalI64, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
+
+#define WASM_INFINITE_LOOP kExprLoop, kLocalVoid, kExprBr, DEPTH_0, kExprEnd
+
+#define WASM_LOOP(...) kExprLoop, kLocalVoid, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_I(...) kExprLoop, kLocalI32, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_L(...) kExprLoop, kLocalI64, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_F(...) kExprLoop, kLocalF32, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_D(...) kExprLoop, kLocalF64, __VA_ARGS__, kExprEnd
+
+#define WASM_IF(cond, tstmt) cond, kExprIf, kLocalVoid, tstmt, kExprEnd
+
 #define WASM_IF_ELSE(cond, tstmt, fstmt) \
-  cond, kExprIf, tstmt, kExprElse, fstmt, kExprEnd
+  cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
+
+#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt)                                \
+  cond, kExprIf, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), tstmt, \
+      kExprElse, fstmt, kExprEnd
+
+#define WASM_IF_ELSE_TT(t1, t2, cond, tstmt, fstmt)                           \
+  cond, kExprIf, kMultivalBlock, 0,                                           \
+      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)),                   \
+      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), tstmt, kExprElse, \
+      fstmt, kExprEnd
+
+#define WASM_IF_ELSE_I(cond, tstmt, fstmt) \
+  cond, kExprIf, kLocalI32, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_L(cond, tstmt, fstmt) \
+  cond, kExprIf, kLocalI64, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_F(cond, tstmt, fstmt) \
+  cond, kExprIf, kLocalF32, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_D(cond, tstmt, fstmt) \
+  cond, kExprIf, kLocalF64, tstmt, kExprElse, fstmt, kExprEnd
+
 #define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
-#define WASM_BR(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
-#define WASM_BR_IF(depth, cond) \
-  cond, kExprBrIf, ARITY_0, static_cast<byte>(depth)
-#define WASM_BRV(depth, val) val, kExprBr, ARITY_1, static_cast<byte>(depth)
-#define WASM_BRV_IF(depth, val, cond) \
-  val, cond, kExprBrIf, ARITY_1, static_cast<byte>(depth)
-#define WASM_BREAK(depth) kExprBr, ARITY_0, static_cast<byte>(depth + 1)
-#define WASM_CONTINUE(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
-#define WASM_BREAKV(depth, val) \
-  val, kExprBr, ARITY_1, static_cast<byte>(depth + 1)
-#define WASM_RETURN0 kExprReturn, ARITY_0
-#define WASM_RETURN1(val) val, kExprReturn, ARITY_1
-#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn, count
+
+#define WASM_RETURN0 kExprReturn
+#define WASM_RETURN1(val) val, kExprReturn
+#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn
+
+#define WASM_BR(depth) kExprBr, static_cast<byte>(depth)
+#define WASM_BR_IF(depth, cond) cond, kExprBrIf, static_cast<byte>(depth)
+#define WASM_BR_IFD(depth, val, cond) \
+  val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
+#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth)
 #define WASM_UNREACHABLE kExprUnreachable
 
 #define WASM_BR_TABLE(key, count, ...) \
-  key, kExprBrTable, ARITY_0, U32V_1(count), __VA_ARGS__
-
-#define WASM_BR_TABLEV(val, key, count, ...) \
-  val, key, kExprBrTable, ARITY_1, U32V_1(count), __VA_ARGS__
+  key, kExprBrTable, U32V_1(count), __VA_ARGS__
 
 #define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
 #define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -343,6 +383,8 @@
       static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
 #define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
 #define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
+#define WASM_TEE_LOCAL(index, val) val, kExprTeeLocal, static_cast<byte>(index)
+#define WASM_DROP kExprDrop
 #define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
 #define WASM_SET_GLOBAL(index, val) \
   val, kExprSetGlobal, static_cast<byte>(index)
@@ -374,49 +416,25 @@
           v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
       alignment, ZERO_OFFSET
 
-#define WASM_CALL_FUNCTION0(index) \
-  kExprCallFunction, 0, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION1(index, a) \
-  a, kExprCallFunction, 1, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION2(index, a, b) \
-  a, b, kExprCallFunction, 2, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION3(index, a, b, c) \
-  a, b, c, kExprCallFunction, 3, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION4(index, a, b, c, d) \
-  a, b, c, d, kExprCallFunction, 4, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION5(index, a, b, c, d, e) \
-  kExprCallFunction, 5, static_cast<byte>(index)
-#define WASM_CALL_FUNCTIONN(arity, index, ...) \
-  __VA_ARGS__, kExprCallFunction, arity, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION(index, ...) \
+  __VA_ARGS__, kExprCallFunction, static_cast<byte>(index)
 
-#define WASM_CALL_IMPORT0(index) kExprCallImport, 0, static_cast<byte>(index)
-#define WASM_CALL_IMPORT1(index, a) \
-  a, kExprCallImport, 1, static_cast<byte>(index)
-#define WASM_CALL_IMPORT2(index, a, b) \
-  a, b, kExprCallImport, 2, static_cast<byte>(index)
-#define WASM_CALL_IMPORT3(index, a, b, c) \
-  a, b, c, kExprCallImport, 3, static_cast<byte>(index)
-#define WASM_CALL_IMPORT4(index, a, b, c, d) \
-  a, b, c, d, kExprCallImport, 4, static_cast<byte>(index)
-#define WASM_CALL_IMPORT5(index, a, b, c, d, e) \
-  a, b, c, d, e, kExprCallImport, 5, static_cast<byte>(index)
-#define WASM_CALL_IMPORTN(arity, index, ...) \
-  __VA_ARGS__, kExprCallImport, U32V_1(arity), static_cast<byte>(index),
-
+// TODO(titzer): change usages of these macros to put func last.
 #define WASM_CALL_INDIRECT0(index, func) \
-  func, kExprCallIndirect, 0, static_cast<byte>(index)
+  func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT1(index, func, a) \
-  func, a, kExprCallIndirect, 1, static_cast<byte>(index)
+  a, func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT2(index, func, a, b) \
-  func, a, b, kExprCallIndirect, 2, static_cast<byte>(index)
+  a, b, func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT3(index, func, a, b, c) \
-  func, a, b, c, kExprCallIndirect, 3, static_cast<byte>(index)
+  a, b, c, func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
-  func, a, b, c, d, kExprCallIndirect, 4, static_cast<byte>(index)
+  a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
-  func, a, b, c, d, e, kExprCallIndirect, 5, static_cast<byte>(index)
+  a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index)
 #define WASM_CALL_INDIRECTN(arity, index, func, ...) \
-  func, __VA_ARGS__, kExprCallIndirect, U32V_1(arity), static_cast<byte>(index)
+  __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index)
 
 #define WASM_NOT(x) x, kExprI32Eqz
 #define WASM_SEQ(...) __VA_ARGS__
@@ -424,11 +442,16 @@
 //------------------------------------------------------------------------------
 // Constructs that are composed of multiple bytecodes.
 //------------------------------------------------------------------------------
-#define WASM_WHILE(x, y) \
-  kExprLoop, x, kExprIf, y, kExprBr, ARITY_1, DEPTH_1, kExprEnd, kExprEnd
+#define WASM_WHILE(x, y)                                              \
+  kExprLoop, kLocalVoid, x, kExprIf, kLocalVoid, y, kExprBr, DEPTH_1, \
+      kExprEnd, kExprEnd
 #define WASM_INC_LOCAL(index)                                            \
   kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
-      kExprSetLocal, static_cast<byte>(index)
+      kExprTeeLocal, static_cast<byte>(index)
+#define WASM_INC_LOCAL_BYV(index, count)                    \
+  kExprGetLocal, static_cast<byte>(index), kExprI8Const,    \
+      static_cast<byte>(count), kExprI32Add, kExprTeeLocal, \
+      static_cast<byte>(index)
 #define WASM_INC_LOCAL_BY(index, count)                     \
   kExprGetLocal, static_cast<byte>(index), kExprI8Const,    \
       static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
@@ -580,11 +603,17 @@
 #define WASM_I64_REINTERPRET_F64(x) x, kExprI64ReinterpretF64
 
 //------------------------------------------------------------------------------
+// Memory Operations.
+//------------------------------------------------------------------------------
+#define WASM_GROW_MEMORY(x) x, kExprGrowMemory
+#define WASM_MEMORY_SIZE kExprMemorySize
+
+//------------------------------------------------------------------------------
 // Simd Operations.
 //------------------------------------------------------------------------------
 #define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
-#define WASM_SIMD_I32x4_EXTRACT_LANE(x, y) \
-  x, y, kSimdPrefix, kExprI32x4ExtractLane & 0xff
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
+  x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
 
 #define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
 #define SIZEOF_SIG_ENTRY_v_v 3
@@ -605,4 +634,13 @@
 #define SIZEOF_SIG_ENTRY_x_xx 6
 #define SIZEOF_SIG_ENTRY_x_xxx 7
 
+#define WASM_BRV(depth, val) val, kExprBr, static_cast<byte>(depth)
+#define WASM_BRV_IF(depth, val, cond) \
+  val, cond, kExprBrIf, static_cast<byte>(depth)
+#define WASM_BRV_IFD(depth, val, cond) \
+  val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
+#define WASM_IFB(cond, ...) cond, kExprIf, kLocalVoid, __VA_ARGS__, kExprEnd
+#define WASM_BR_TABLEV(val, key, count, ...) \
+  val, key, kExprBrTable, U32V_1(count), __VA_ARGS__
+
 #endif  // V8_WASM_MACRO_GEN_H_
diff --git a/src/wasm/encoder.cc b/src/wasm/wasm-module-builder.cc
similarity index 61%
rename from src/wasm/encoder.cc
rename to src/wasm/wasm-module-builder.cc
index ef0bddc..084f5a0 100644
--- a/src/wasm/encoder.cc
+++ b/src/wasm/wasm-module-builder.cc
@@ -6,12 +6,12 @@
 
 #include "src/handles.h"
 #include "src/v8.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 #include "src/wasm/ast-decoder.h"
-#include "src/wasm/encoder.h"
 #include "src/wasm/leb-helper.h"
 #include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module-builder.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 
@@ -30,15 +30,11 @@
 namespace internal {
 namespace wasm {
 
-// Emit a section name and the size as a padded varint that can be patched
+// Emit a section code and the size as a padded varint that can be patched
 // later.
-size_t EmitSection(WasmSection::Code code, ZoneBuffer& buffer) {
-  // Emit the section name.
-  const char* name = WasmSection::getName(code);
-  TRACE("emit section: %s\n", name);
-  size_t length = WasmSection::getNameLength(code);
-  buffer.write_size(length);  // Section name string size.
-  buffer.write(reinterpret_cast<const byte*>(name), length);
+size_t EmitSection(WasmSectionCode code, ZoneBuffer& buffer) {
+  // Emit the section code.
+  buffer.write_u8(code);
 
   // Emit a placeholder for the length.
   return buffer.reserve_u32v();
@@ -55,8 +51,14 @@
       locals_(builder->zone()),
       signature_index_(0),
       exported_(0),
+      func_index_(static_cast<uint32_t>(builder->functions_.size())),
       body_(builder->zone()),
-      name_(builder->zone()) {}
+      name_(builder->zone()),
+      i32_temps_(builder->zone()),
+      i64_temps_(builder->zone()),
+      f32_temps_(builder->zone()),
+      f64_temps_(builder->zone()),
+      direct_calls_(builder->zone()) {}
 
 void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
   byte buffer[8];
@@ -86,6 +88,10 @@
   EmitWithVarInt(kExprSetLocal, local_index);
 }
 
+void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
+  EmitWithVarInt(kExprTeeLocal, local_index);
+}
+
 void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
   for (size_t i = 0; i < code_size; ++i) {
     body_.push_back(code[i]);
@@ -124,6 +130,15 @@
   }
 }
 
+void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
+  DirectCallIndex call;
+  call.offset = body_.size();
+  call.direct_index = index;
+  direct_calls_.push_back(call);
+  byte code[] = {U32V_5(0)};
+  EmitCode(code, sizeof(code));
+}
+
 void WasmFunctionBuilder::SetExported() { exported_ = true; }
 
 void WasmFunctionBuilder::SetName(const char* name, int name_length) {
@@ -139,14 +154,15 @@
   buffer.write_u32v(signature_index_);
 }
 
-void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer,
-                                      uint32_t func_index) const {
+void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
   if (exported_) {
-    buffer.write_u32v(func_index);
     buffer.write_size(name_.size());
     if (name_.size() > 0) {
       buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
     }
+    buffer.write_u8(kExternalFunction);
+    buffer.write_u32v(func_index_ +
+                      static_cast<uint32_t>(builder_->imports_.size()));
   }
 }
 
@@ -158,24 +174,16 @@
   locals_.Emit(*ptr);
   (*ptr) += locals_size;  // UGLY: manual bump of position pointer
   if (body_.size() > 0) {
+    size_t base = buffer.offset();
     buffer.write(&body_[0], body_.size());
+    for (DirectCallIndex call : direct_calls_) {
+      buffer.patch_u32v(
+          base + call.offset,
+          call.direct_index + static_cast<uint32_t>(builder_->imports_.size()));
+    }
   }
 }
 
-WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
-                                               uint32_t size, uint32_t dest)
-    : data_(zone), dest_(dest) {
-  for (size_t i = 0; i < size; ++i) {
-    data_.push_back(data[i]);
-  }
-}
-
-void WasmDataSegmentEncoder::Write(ZoneBuffer& buffer) const {
-  buffer.write_u32v(dest_);
-  buffer.write_u32v(static_cast<uint32_t>(data_.size()));
-  buffer.write(&data_[0], data_.size());
-}
-
 WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
     : zone_(zone),
       signatures_(zone),
@@ -187,23 +195,22 @@
       signature_map_(zone),
       start_function_index_(-1) {}
 
-uint32_t WasmModuleBuilder::AddFunction() {
+WasmFunctionBuilder* WasmModuleBuilder::AddFunction(FunctionSig* sig) {
   functions_.push_back(new (zone_) WasmFunctionBuilder(this));
-  return static_cast<uint32_t>(functions_.size() - 1);
+  // Add the signature if one was provided here.
+  if (sig) functions_.back()->SetSignature(sig);
+  return functions_.back();
 }
 
-WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
-  if (functions_.size() > index) {
-    return functions_.at(index);
-  } else {
-    return nullptr;
+void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
+                                       uint32_t dest) {
+  data_segments_.push_back({ZoneVector<byte>(zone()), dest});
+  ZoneVector<byte>& vec = data_segments_.back().data;
+  for (uint32_t i = 0; i < size; i++) {
+    vec.push_back(data[i]);
   }
 }
 
-void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
-  data_segments_.push_back(data);
-}
-
 bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
                                                         FunctionSig* b) const {
   if (a->return_count() < b->return_count()) return true;
@@ -243,12 +250,13 @@
   return static_cast<uint32_t>(imports_.size() - 1);
 }
 
-void WasmModuleBuilder::MarkStartFunction(uint32_t index) {
-  start_function_index_ = index;
+void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
+  start_function_index_ = function->func_index();
 }
 
-uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported) {
-  globals_.push_back(std::make_pair(type, exported));
+uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
+                                      bool mutability) {
+  globals_.push_back({type, exported, mutability});
   return static_cast<uint32_t>(globals_.size() - 1);
 }
 
@@ -262,7 +270,7 @@
 
   // == Emit signatures ========================================================
   if (signatures_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::Signatures, buffer);
+    size_t start = EmitSection(kTypeSectionCode, buffer);
     buffer.write_size(signatures_.size());
 
     for (FunctionSig* sig : signatures_) {
@@ -279,86 +287,128 @@
     FixupSection(buffer, start);
   }
 
-  // == Emit globals ===========================================================
-  if (globals_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::Globals, buffer);
-    buffer.write_size(globals_.size());
-
-    for (auto global : globals_) {
-      buffer.write_u32v(0);  // Length of the global name.
-      buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.first));
-      buffer.write_u8(global.second);
-    }
-    FixupSection(buffer, start);
-  }
-
   // == Emit imports ===========================================================
   if (imports_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::ImportTable, buffer);
+    size_t start = EmitSection(kImportSectionCode, buffer);
     buffer.write_size(imports_.size());
     for (auto import : imports_) {
-      buffer.write_u32v(import.sig_index);
-      buffer.write_u32v(import.name_length);
-      buffer.write(reinterpret_cast<const byte*>(import.name),
+      buffer.write_u32v(import.name_length);  // module name length
+      buffer.write(reinterpret_cast<const byte*>(import.name),  // module name
                    import.name_length);
-      buffer.write_u32v(0);
+      buffer.write_u32v(0);  // field name length
+      buffer.write_u8(kExternalFunction);
+      buffer.write_u32v(import.sig_index);
     }
     FixupSection(buffer, start);
   }
 
   // == Emit function signatures ===============================================
+  bool has_names = false;
   if (functions_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::FunctionSignatures, buffer);
+    size_t start = EmitSection(kFunctionSectionCode, buffer);
     buffer.write_size(functions_.size());
     for (auto function : functions_) {
       function->WriteSignature(buffer);
       if (function->exported()) exports++;
+      if (function->name_.size() > 0) has_names = true;
     }
     FixupSection(buffer, start);
   }
 
   // == emit function table ====================================================
   if (indirect_functions_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::FunctionTable, buffer);
+    size_t start = EmitSection(kTableSectionCode, buffer);
+    buffer.write_u8(1);  // table count
+    buffer.write_u8(kWasmAnyFunctionTypeForm);
+    buffer.write_u8(kResizableMaximumFlag);
     buffer.write_size(indirect_functions_.size());
-
-    for (auto index : indirect_functions_) {
-      buffer.write_u32v(index);
-    }
+    buffer.write_size(indirect_functions_.size());
     FixupSection(buffer, start);
   }
 
   // == emit memory declaration ================================================
   {
-    size_t start = EmitSection(WasmSection::Code::Memory, buffer);
+    size_t start = EmitSection(kMemorySectionCode, buffer);
+    buffer.write_u8(1);  // memory count
+    buffer.write_u32v(kResizableMaximumFlag);
     buffer.write_u32v(16);  // min memory size
     buffer.write_u32v(16);  // max memory size
-    buffer.write_u8(0);     // memory export
-    static_assert(kDeclMemorySize == 3, "memory size must match emit above");
+    FixupSection(buffer, start);
+  }
+
+  // == Emit globals ===========================================================
+  if (globals_.size() > 0) {
+    size_t start = EmitSection(kGlobalSectionCode, buffer);
+    buffer.write_size(globals_.size());
+
+    for (auto global : globals_) {
+      buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
+      buffer.write_u8(global.mutability ? 1 : 0);
+      switch (global.type) {
+        case kAstI32: {
+          static const byte code[] = {WASM_I32V_1(0)};
+          buffer.write(code, sizeof(code));
+          break;
+        }
+        case kAstF32: {
+          static const byte code[] = {WASM_F32(0)};
+          buffer.write(code, sizeof(code));
+          break;
+        }
+        case kAstI64: {
+          static const byte code[] = {WASM_I64V_1(0)};
+          buffer.write(code, sizeof(code));
+          break;
+        }
+        case kAstF64: {
+          static const byte code[] = {WASM_F64(0.0)};
+          buffer.write(code, sizeof(code));
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      buffer.write_u8(kExprEnd);
+    }
     FixupSection(buffer, start);
   }
 
   // == emit exports ===========================================================
   if (exports > 0) {
-    size_t start = EmitSection(WasmSection::Code::ExportTable, buffer);
+    size_t start = EmitSection(kExportSectionCode, buffer);
     buffer.write_u32v(exports);
-    uint32_t index = 0;
-    for (auto function : functions_) {
-      function->WriteExport(buffer, index++);
-    }
+    for (auto function : functions_) function->WriteExport(buffer);
     FixupSection(buffer, start);
   }
 
   // == emit start function index ==============================================
   if (start_function_index_ >= 0) {
-    size_t start = EmitSection(WasmSection::Code::StartFunction, buffer);
-    buffer.write_u32v(start_function_index_);
+    size_t start = EmitSection(kStartSectionCode, buffer);
+    buffer.write_u32v(start_function_index_ +
+                      static_cast<uint32_t>(imports_.size()));
+    FixupSection(buffer, start);
+  }
+
+  // == emit function table elements ===========================================
+  if (indirect_functions_.size() > 0) {
+    size_t start = EmitSection(kElementSectionCode, buffer);
+    buffer.write_u8(1);              // count of entries
+    buffer.write_u8(0);              // table index
+    buffer.write_u8(kExprI32Const);  // offset
+    buffer.write_u32v(0);
+    buffer.write_u8(kExprEnd);
+    buffer.write_size(indirect_functions_.size());  // element count
+
+    for (auto index : indirect_functions_) {
+      buffer.write_u32v(index + static_cast<uint32_t>(imports_.size()));
+    }
+
     FixupSection(buffer, start);
   }
 
   // == emit code ==============================================================
   if (functions_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::FunctionBodies, buffer);
+    size_t start = EmitSection(kCodeSectionCode, buffer);
     buffer.write_size(functions_.size());
     for (auto function : functions_) {
       function->WriteBody(buffer);
@@ -368,11 +418,38 @@
 
   // == emit data segments =====================================================
   if (data_segments_.size() > 0) {
-    size_t start = EmitSection(WasmSection::Code::DataSegments, buffer);
+    size_t start = EmitSection(kDataSectionCode, buffer);
     buffer.write_size(data_segments_.size());
 
     for (auto segment : data_segments_) {
-      segment->Write(buffer);
+      buffer.write_u8(0);              // linear memory segment
+      buffer.write_u8(kExprI32Const);  // initializer expression for dest
+      buffer.write_u32v(segment.dest);
+      buffer.write_u8(kExprEnd);
+      buffer.write_u32v(static_cast<uint32_t>(segment.data.size()));
+      buffer.write(&segment.data[0], segment.data.size());
+    }
+    FixupSection(buffer, start);
+  }
+
+  // == Emit names =============================================================
+  if (has_names) {
+    // Emit the section code.
+    buffer.write_u8(kUnknownSectionCode);
+    // Emit a placeholder for the length.
+    size_t start = buffer.reserve_u32v();
+    // Emit the section string.
+    buffer.write_size(4);
+    buffer.write(reinterpret_cast<const byte*>("name"), 4);
+    // Emit the names.
+    buffer.write_size(functions_.size());
+    for (auto function : functions_) {
+      buffer.write_size(function->name_.size());
+      if (function->name_.size() > 0) {
+        buffer.write(reinterpret_cast<const byte*>(&function->name_[0]),
+                     function->name_.size());
+      }
+      buffer.write_u8(0);
     }
     FixupSection(buffer, start);
   }
diff --git a/src/wasm/encoder.h b/src/wasm/wasm-module-builder.h
similarity index 61%
rename from src/wasm/encoder.h
rename to src/wasm/wasm-module-builder.h
index eb8aa64..dcaf6c8 100644
--- a/src/wasm/encoder.h
+++ b/src/wasm/wasm-module-builder.h
@@ -2,11 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_WASM_ENCODER_H_
-#define V8_WASM_ENCODER_H_
+#ifndef V8_WASM_WASM_MODULE_BUILDER_H_
+#define V8_WASM_WASM_MODULE_BUILDER_H_
 
 #include "src/signature.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
 
 #include "src/wasm/leb-helper.h"
 #include "src/wasm/wasm-macro-gen.h"
@@ -90,13 +90,14 @@
 
   void EnsureSpace(size_t size) {
     if ((pos_ + size) > end_) {
-      size_t new_size = 4096 + (end_ - buffer_) * 3;
+      size_t new_size = 4096 + size + (end_ - buffer_) * 3;
       byte* new_buffer = reinterpret_cast<byte*>(zone_->New(new_size));
       memcpy(new_buffer, buffer_, (pos_ - buffer_));
       pos_ = new_buffer + (pos_ - buffer_);
       buffer_ = new_buffer;
       end_ = new_buffer + new_size;
     }
+    DCHECK(pos_ + size <= end_);
   }
 
   byte** pos_ptr() { return &pos_; }
@@ -110,7 +111,7 @@
 
 class WasmModuleBuilder;
 
-class WasmFunctionBuilder : public ZoneObject {
+class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
  public:
   // Building methods.
   void SetSignature(FunctionSig* sig);
@@ -120,61 +121,102 @@
   void Emit(WasmOpcode opcode);
   void EmitGetLocal(uint32_t index);
   void EmitSetLocal(uint32_t index);
+  void EmitTeeLocal(uint32_t index);
   void EmitI32Const(int32_t val);
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
   void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
   void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+  void EmitDirectCallIndex(uint32_t index);
   void SetExported();
   void SetName(const char* name, int name_length);
-  bool exported() { return exported_; }
 
-  // Writing methods.
   void WriteSignature(ZoneBuffer& buffer) const;
-  void WriteExport(ZoneBuffer& buffer, uint32_t func_index) const;
+  void WriteExport(ZoneBuffer& buffer) const;
   void WriteBody(ZoneBuffer& buffer) const;
 
+  bool exported() { return exported_; }
+  uint32_t func_index() { return func_index_; }
+  FunctionSig* signature();
+
  private:
   explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
   friend class WasmModuleBuilder;
+  friend class WasmTemporary;
+
+  struct DirectCallIndex {
+    size_t offset;
+    uint32_t direct_index;
+  };
+
   WasmModuleBuilder* builder_;
   LocalDeclEncoder locals_;
   uint32_t signature_index_;
   bool exported_;
+  uint32_t func_index_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
+  ZoneVector<uint32_t> i32_temps_;
+  ZoneVector<uint32_t> i64_temps_;
+  ZoneVector<uint32_t> f32_temps_;
+  ZoneVector<uint32_t> f64_temps_;
+  ZoneVector<DirectCallIndex> direct_calls_;
 };
 
-// TODO(titzer): kill!
-class WasmDataSegmentEncoder : public ZoneObject {
+class WasmTemporary {
  public:
-  WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
-                         uint32_t dest);
-  void Write(ZoneBuffer& buffer) const;
+  WasmTemporary(WasmFunctionBuilder* builder, LocalType type) {
+    switch (type) {
+      case kAstI32:
+        temporary_ = &builder->i32_temps_;
+        break;
+      case kAstI64:
+        temporary_ = &builder->i64_temps_;
+        break;
+      case kAstF32:
+        temporary_ = &builder->f32_temps_;
+        break;
+      case kAstF64:
+        temporary_ = &builder->f64_temps_;
+        break;
+      default:
+        UNREACHABLE();
+        temporary_ = nullptr;
+    }
+    if (temporary_->size() == 0) {
+      // Allocate a new temporary.
+      index_ = builder->AddLocal(type);
+    } else {
+      // Reuse a previous temporary.
+      index_ = temporary_->back();
+      temporary_->pop_back();
+    }
+  }
+  ~WasmTemporary() {
+    temporary_->push_back(index_);  // return the temporary to the list.
+  }
+  uint32_t index() { return index_; }
 
  private:
-  ZoneVector<byte> data_;
-  uint32_t dest_;
+  ZoneVector<uint32_t>* temporary_;
+  uint32_t index_;
 };
 
-struct WasmFunctionImport {
-  uint32_t sig_index;
-  const char* name;
-  int name_length;
-};
-
-class WasmModuleBuilder : public ZoneObject {
+class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
  public:
   explicit WasmModuleBuilder(Zone* zone);
 
   // Building methods.
-  uint32_t AddFunction();
-  uint32_t AddGlobal(LocalType type, bool exported);
-  WasmFunctionBuilder* FunctionAt(size_t index);
-  void AddDataSegment(WasmDataSegmentEncoder* data);
+  uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
+  void SetImportName(uint32_t index, const char* name, int name_length) {
+    imports_[index].name = name;
+    imports_[index].name_length = name_length;
+  }
+  WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
+  uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true);
+  void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
   uint32_t AddSignature(FunctionSig* sig);
   void AddIndirectFunction(uint32_t index);
-  void MarkStartFunction(uint32_t index);
-  uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
+  void MarkStartFunction(WasmFunctionBuilder* builder);
 
   // Writing methods.
   void WriteTo(ZoneBuffer& buffer) const;
@@ -186,20 +228,44 @@
 
   Zone* zone() { return zone_; }
 
+  FunctionSig* GetSignature(uint32_t index) { return signatures_[index]; }
+
  private:
+  struct WasmFunctionImport {
+    uint32_t sig_index;
+    const char* name;
+    int name_length;
+  };
+
+  struct WasmGlobal {
+    LocalType type;
+    bool exported;
+    bool mutability;
+  };
+
+  struct WasmDataSegment {
+    ZoneVector<byte> data;
+    uint32_t dest;
+  };
+
+  friend class WasmFunctionBuilder;
   Zone* zone_;
   ZoneVector<FunctionSig*> signatures_;
   ZoneVector<WasmFunctionImport> imports_;
   ZoneVector<WasmFunctionBuilder*> functions_;
-  ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+  ZoneVector<WasmDataSegment> data_segments_;
   ZoneVector<uint32_t> indirect_functions_;
-  ZoneVector<std::pair<LocalType, bool>> globals_;
+  ZoneVector<WasmGlobal> globals_;
   SignatureMap signature_map_;
   int start_function_index_;
 };
 
+inline FunctionSig* WasmFunctionBuilder::signature() {
+  return builder_->signatures_[signature_index_];
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_WASM_ENCODER_H_
+#endif  // V8_WASM_WASM_MODULE_BUILDER_H_
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index 94bf998..f4cf505 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -18,6 +18,7 @@
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-debug.h"
 #include "src/wasm/wasm-function-name-table.h"
+#include "src/wasm/wasm-js.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-result.h"
 
@@ -27,179 +28,56 @@
 namespace internal {
 namespace wasm {
 
+#define TRACE(...)                                      \
+  do {                                                  \
+    if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+  } while (false)
+
+#define TRACE_CHAIN(instance)        \
+  do {                               \
+    instance->PrintInstancesChain(); \
+  } while (false)
+
+namespace {
+
+static const int kPlaceholderMarker = 1000000000;
+
 enum JSFunctionExportInternalField {
   kInternalModuleInstance,
   kInternalArity,
   kInternalSignature
 };
 
-static const int kPlaceholderMarker = 1000000000;
-
-static const char* wasmSections[] = {
-#define F(enumerator, order, string) string,
-    FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
-        "<unknown>"  // entry for "Max"
-};
-
-static uint8_t wasmSectionsLengths[]{
-#define F(enumerator, order, string) sizeof(string) - 1,
-    FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
-        9  // entry for "Max"
-};
-
-static uint8_t wasmSectionsOrders[]{
-#define F(enumerator, order, string) order,
-    FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
-        0  // entry for "Max"
-};
-
-static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
-                  (size_t)WasmSection::Code::Max + 1,
-              "expected enum WasmSection::Code to be monotonic from 0");
-
-WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
-WasmSection::Code WasmSection::end() { return WasmSection::Code::Max; }
-WasmSection::Code WasmSection::next(WasmSection::Code code) {
-  return (WasmSection::Code)(1 + (uint32_t)code);
-}
-
-const char* WasmSection::getName(WasmSection::Code code) {
-  return wasmSections[(size_t)code];
-}
-
-size_t WasmSection::getNameLength(WasmSection::Code code) {
-  return wasmSectionsLengths[(size_t)code];
-}
-
-int WasmSection::getOrder(WasmSection::Code code) {
-  return wasmSectionsOrders[(size_t)code];
-}
-
-WasmSection::Code WasmSection::lookup(const byte* string, uint32_t length) {
-  // TODO(jfb) Linear search, it may be better to do a common-prefix search.
-  for (Code i = begin(); i != end(); i = next(i)) {
-    if (getNameLength(i) == length && 0 == memcmp(getName(i), string, length)) {
-      return i;
-    }
-  }
-  return Code::Max;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
-  os << "WASM module with ";
-  os << (module.min_mem_pages * module.kPageSize) << " min mem";
-  os << (module.max_mem_pages * module.kPageSize) << " max mem";
-  os << module.functions.size() << " functions";
-  os << module.functions.size() << " globals";
-  os << module.functions.size() << " data segments";
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
-  os << "WASM function with signature " << *function.sig;
-
-  os << " code bytes: "
-     << (function.code_end_offset - function.code_start_offset);
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
-  os << "#" << pair.function_->func_index << ":";
-  if (pair.function_->name_offset > 0) {
-    if (pair.module_) {
-      WasmName name = pair.module_->GetName(pair.function_->name_offset,
-                                            pair.function_->name_length);
-      os.write(name.start(), name.length());
-    } else {
-      os << "+" << pair.function_->func_index;
-    }
-  } else {
-    os << "?";
-  }
-  return os;
-}
-
-Handle<JSFunction> WrapExportCodeAsJSFunction(
-    Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
-    MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
-  Handle<SharedFunctionInfo> shared =
-      isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
-  shared->set_length(arity);
-  shared->set_internal_formal_parameter_count(arity);
-  Handle<JSFunction> function = isolate->factory()->NewFunction(
-      isolate->wasm_function_map(), name, export_code);
-  function->set_shared(*shared);
-
-  function->SetInternalField(kInternalModuleInstance, *module_instance);
-  // add another Internal Field as the function arity
-  function->SetInternalField(kInternalArity, Smi::FromInt(arity));
-  // add another Internal Field as the signature of the foreign function
-  Handle<ByteArray> signature;
-  if (maybe_signature.ToHandle(&signature)) {
-    function->SetInternalField(kInternalSignature, *signature);
-  }
-  return function;
-}
-
-namespace {
 // Internal constants for the layout of the module object.
-const int kWasmModuleFunctionTable = 0;
-const int kWasmModuleCodeTable = 1;
-const int kWasmMemArrayBuffer = 2;
-const int kWasmGlobalsArrayBuffer = 3;
-// TODO(clemensh): Remove function name array, extract names from module bytes.
-const int kWasmFunctionNamesArray = 4;
-const int kWasmModuleBytesString = 5;
-const int kWasmDebugInfo = 6;
-const int kWasmModuleInternalFieldCount = 7;
-
-// TODO(mtrofin): Unnecessary once we stop using JS Heap for wasm code.
-// For now, each field is expected to have the type commented by its side.
-// The elements typed as "maybe" are optional. The others are mandatory. Since
-// the compiled module is either obtained from the current v8 instance, or from
-// a snapshot produced by a compatible (==identical) v8 instance, we simply
-// fail at instantiation time, in the face of invalid data.
-enum CompiledWasmObjectFields {
-  kFunctions,        // FixedArray of Code
-  kImportData,       // maybe FixedArray of FixedArray respecting the
-                     // WasmImportMetadata structure.
-  kExports,          // maybe FixedArray of FixedArray of WasmExportMetadata
-                     // structure
-  kStartupFunction,  // maybe FixedArray of WasmExportMetadata structure
-  kTableOfIndirectFunctionTables,  // maybe FixedArray of FixedArray of
-                                   // WasmIndirectFunctionTableMetadata
-  kModuleBytes,                    // maybe String
-  kFunctionNameTable,              // maybe ByteArray
-  kMinRequiredMemory,              // Smi. an uint32_t
-  // The following 2 are either together present or absent:
-  kDataSegmentsInfo,  // maybe FixedArray of FixedArray respecting the
-                      // WasmSegmentInfo structure
-  kDataSegments,      // maybe ByteArray.
-
-  kGlobalsSize,                 // Smi. an uint32_t
-  kExportMem,                   // Smi. bool
-  kOrigin,                      // Smi. ModuleOrigin
-  kCompiledWasmObjectTableSize  // Sentinel value.
+enum WasmInstanceObjectFields {
+  kWasmCompiledModule = 0,
+  kWasmModuleFunctionTable,
+  kWasmModuleCodeTable,
+  kWasmMemArrayBuffer,
+  kWasmGlobalsArrayBuffer,
+  // TODO(clemensh): Remove function name array, extract names from module
+  // bytes.
+  kWasmFunctionNamesArray,
+  kWasmModuleBytesString,
+  kWasmDebugInfo,
+  kWasmNumImportedFunctions,
+  kWasmModuleInternalFieldCount
 };
 
-enum WasmImportMetadata {
-  kModuleName,              // String
-  kFunctionName,            // maybe String
-  kOutputCount,             // Smi. an uint32_t
-  kSignature,               // ByteArray. A copy of the data in FunctionSig
-  kWasmImportDataTableSize  // Sentinel value.
+enum WasmImportData {
+  kModuleName,         // String
+  kFunctionName,       // maybe String
+  kOutputCount,        // Smi. an uint32_t
+  kSignature,          // ByteArray. A copy of the data in FunctionSig
+  kWasmImportDataSize  // Sentinel value.
 };
 
-enum WasmExportMetadata {
-  kExportCode,                  // Code
-  kExportName,                  // String
-  kExportArity,                 // Smi, an int
-  kExportedFunctionIndex,       // Smi, an uint32_t
-  kExportedSignature,           // ByteArray. A copy of the data in FunctionSig
-  kWasmExportMetadataTableSize  // Sentinel value.
+enum WasmExportData {
+  kExportName,             // String
+  kExportArity,            // Smi, an int
+  kExportedFunctionIndex,  // Smi, an uint32_t
+  kExportedSignature,      // ByteArray. A copy of the data in FunctionSig
+  kWasmExportDataSize      // Sentinel value.
 };
 
 enum WasmSegmentInfo {
@@ -208,31 +86,26 @@
   kWasmSegmentInfoSize  // Sentinel value.
 };
 
-enum WasmIndirectFunctionTableMetadata {
-  kSize,   // Smi. an uint32_t
-  kTable,  // FixedArray of indirect function table
-  kWasmIndirectFunctionTableMetadataSize  // Sentinel value.
+enum WasmIndirectFunctionTableData {
+  kSize,                              // Smi. an uint32_t
+  kTable,                             // FixedArray of indirect function table
+  kWasmIndirectFunctionTableDataSize  // Sentinel value.
 };
 
 uint32_t GetMinModuleMemSize(const WasmModule* module) {
   return WasmModule::kPageSize * module->min_mem_pages;
 }
 
-void LoadDataSegments(Handle<FixedArray> compiled_module, Address mem_addr,
-                      size_t mem_size) {
-  Isolate* isolate = compiled_module->GetIsolate();
-  MaybeHandle<ByteArray> maybe_data =
-      compiled_module->GetValue<ByteArray>(isolate, kDataSegments);
-  MaybeHandle<FixedArray> maybe_segments =
-      compiled_module->GetValue<FixedArray>(isolate, kDataSegmentsInfo);
+void LoadDataSegments(Handle<WasmCompiledModule> compiled_module,
+                      Address mem_addr, size_t mem_size) {
+  CHECK(compiled_module->has_data_segments() ==
+        compiled_module->has_data_segments_info());
 
-  // We either have both or neither.
-  CHECK(maybe_data.is_null() == maybe_segments.is_null());
   // If we have neither, we're done.
-  if (maybe_data.is_null()) return;
+  if (!compiled_module->has_data_segments()) return;
 
-  Handle<ByteArray> data = maybe_data.ToHandleChecked();
-  Handle<FixedArray> segments = maybe_segments.ToHandleChecked();
+  Handle<ByteArray> data = compiled_module->data_segments();
+  Handle<FixedArray> segments = compiled_module->data_segments_info();
 
   uint32_t last_extraction_pos = 0;
   for (int i = 0; i < segments->length(); ++i) {
@@ -250,12 +123,11 @@
 }
 
 void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
-                         Handle<FixedArray> compiled_module) {
+                         Handle<WasmCompiledModule> compiled_module) {
   Handle<FixedArray> segments = factory->NewFixedArray(
       static_cast<int>(module->data_segments.size()), TENURED);
   uint32_t data_size = 0;
   for (const WasmDataSegment& segment : module->data_segments) {
-    if (!segment.init) continue;
     if (segment.source_size == 0) continue;
     data_size += segment.source_size;
   }
@@ -264,11 +136,12 @@
   uint32_t last_insertion_pos = 0;
   for (uint32_t i = 0; i < module->data_segments.size(); ++i) {
     const WasmDataSegment& segment = module->data_segments[i];
-    if (!segment.init) continue;
     if (segment.source_size == 0) continue;
     Handle<ByteArray> js_segment =
         factory->NewByteArray(kWasmSegmentInfoSize * sizeof(uint32_t), TENURED);
-    js_segment->set_int(kDestAddr, segment.dest_addr);
+    // TODO(titzer): add support for global offsets for dest_addr
+    CHECK_EQ(WasmInitExpr::kI32Const, segment.dest_addr.kind);
+    js_segment->set_int(kDestAddr, segment.dest_addr.val.i32_const);
     js_segment->set_int(kSourceSize, segment.source_size);
     segments->set(i, *js_segment);
     data->copy_in(last_insertion_pos,
@@ -276,8 +149,8 @@
                   segment.source_size);
     last_insertion_pos += segment.source_size;
   }
-  compiled_module->set(kDataSegmentsInfo, *segments);
-  compiled_module->set(kDataSegments, *data);
+  compiled_module->set_data_segments_info(segments);
+  compiled_module->set_data_segments(data);
 }
 
 void PatchFunctionTable(Handle<Code> code,
@@ -315,8 +188,9 @@
   return buffer;
 }
 
-void RelocateInstanceCode(Handle<JSObject> instance, Address start,
-                          uint32_t prev_size, uint32_t new_size) {
+void RelocateInstanceCode(Handle<JSObject> instance, Address old_start,
+                          Address start, uint32_t prev_size,
+                          uint32_t new_size) {
   Handle<FixedArray> functions = Handle<FixedArray>(
       FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
   for (int i = 0; i < functions->length(); ++i) {
@@ -325,7 +199,7 @@
     int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
                (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
     for (RelocIterator it(*function, mask); !it.done(); it.next()) {
-      it.rinfo()->update_wasm_memory_reference(nullptr, start, prev_size,
+      it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
                                                new_size);
     }
   }
@@ -347,7 +221,8 @@
   return mem_buffer;
 }
 
-void RelocateGlobals(Handle<JSObject> instance, Address globals_start) {
+void RelocateGlobals(Handle<JSObject> instance, Address old_start,
+                     Address globals_start) {
   Handle<FixedArray> functions = Handle<FixedArray>(
       FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
   uint32_t function_count = static_cast<uint32_t>(functions->length());
@@ -356,7 +231,7 @@
     AllowDeferredHandleDereference embedding_raw_address;
     int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
     for (RelocIterator it(*function, mask); !it.done(); it.next()) {
-      it.rinfo()->update_wasm_global_reference(nullptr, globals_start);
+      it.rinfo()->update_wasm_global_reference(old_start, globals_start);
     }
   }
 }
@@ -375,64 +250,41 @@
   return code;
 }
 
-// TODO(mtrofin): remove when we stop relying on placeholders.
-void InitializePlaceholders(Factory* factory,
-                            std::vector<Handle<Code>>* placeholders,
-                            size_t size) {
-  DCHECK(placeholders->empty());
-  placeholders->reserve(size);
-
-  for (uint32_t i = 0; i < size; ++i) {
-    placeholders->push_back(CreatePlaceholder(factory, i, Code::WASM_FUNCTION));
-  }
-}
-
 bool LinkFunction(Handle<Code> unlinked,
-                  const std::vector<Handle<Code>>& code_targets,
-                  Code::Kind kind) {
+                  std::vector<Handle<Code>>& code_table) {
   bool modified = false;
-  int mode_mask = RelocInfo::kCodeTargetMask;
+  int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
   AllowDeferredHandleDereference embedding_raw_address;
   for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (RelocInfo::IsCodeTarget(mode)) {
       Code* target =
           Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-      if (target->kind() == kind &&
-          target->constant_pool_offset() >= kPlaceholderMarker) {
-        // Patch direct calls to placeholder code objects.
-        uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
-        CHECK(index < code_targets.size());
-        Handle<Code> new_target = code_targets[index];
-        if (target != *new_target) {
-          it.rinfo()->set_target_address(new_target->instruction_start(),
-                                         UPDATE_WRITE_BARRIER,
-                                         SKIP_ICACHE_FLUSH);
-          modified = true;
+      if (target->constant_pool_offset() < kPlaceholderMarker) continue;
+      switch (target->kind()) {
+        case Code::WASM_FUNCTION:        // fall through
+        case Code::WASM_TO_JS_FUNCTION:  // fall through
+        case Code::JS_TO_WASM_FUNCTION: {
+          // Patch direct calls to placeholder code objects.
+          uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+          Handle<Code> new_target = code_table[index];
+          if (target != *new_target) {
+            it.rinfo()->set_target_address(new_target->instruction_start(),
+                                           UPDATE_WRITE_BARRIER,
+                                           SKIP_ICACHE_FLUSH);
+            modified = true;
+          }
+          break;
         }
+        default:
+          break;
       }
     }
   }
   return modified;
 }
 
-void LinkModuleFunctions(Isolate* isolate,
-                         std::vector<Handle<Code>>& functions) {
-  for (size_t i = 0; i < functions.size(); ++i) {
-    Handle<Code> code = functions[i];
-    LinkFunction(code, functions, Code::WASM_FUNCTION);
-  }
-}
-
-void LinkImports(Isolate* isolate, std::vector<Handle<Code>>& functions,
-                 const std::vector<Handle<Code>>& imports) {
-  for (uint32_t i = 0; i < functions.size(); ++i) {
-    Handle<Code> code = functions[i];
-    LinkFunction(code, imports, Code::WASM_TO_JS_FUNCTION);
-  }
-}
-
-void FlushAssemblyCache(Isolate* isolate, Handle<FixedArray> functions) {
+void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
   for (int i = 0; i < functions->length(); ++i) {
     Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
     Assembler::FlushICache(isolate, code->instruction_start(),
@@ -440,39 +292,146 @@
   }
 }
 
-}  // namespace
+// Fetches the compilation unit of a wasm function and executes its parallel
+// phase.
+bool FetchAndExecuteCompilationUnit(
+    Isolate* isolate,
+    std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+    std::queue<compiler::WasmCompilationUnit*>* executed_units,
+    base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+  DisallowCodeDependencyChange no_dependency_change;
 
-WasmModule::WasmModule(byte* module_start)
-    : module_start(module_start),
-      module_end(nullptr),
-      min_mem_pages(0),
-      max_mem_pages(0),
-      mem_export(false),
-      mem_external(false),
-      start_function_index(-1),
-      origin(kWasmOrigin),
-      globals_size(0),
-      pending_tasks(new base::Semaphore(0)) {}
+  // - 1 because AtomicIncrement returns the value after the atomic increment.
+  size_t index = next_unit->Increment(1) - 1;
+  if (index >= compilation_units->size()) {
+    return false;
+  }
+
+  compiler::WasmCompilationUnit* unit = compilation_units->at(index);
+  if (unit != nullptr) {
+    unit->ExecuteCompilation();
+    base::LockGuard<base::Mutex> guard(result_mutex);
+    executed_units->push(unit);
+  }
+  return true;
+}
+
+class WasmCompilationTask : public CancelableTask {
+ public:
+  WasmCompilationTask(
+      Isolate* isolate,
+      std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+      std::queue<compiler::WasmCompilationUnit*>* executed_units,
+      base::Semaphore* on_finished, base::Mutex* result_mutex,
+      base::AtomicNumber<size_t>* next_unit)
+      : CancelableTask(isolate),
+        isolate_(isolate),
+        compilation_units_(compilation_units),
+        executed_units_(executed_units),
+        on_finished_(on_finished),
+        result_mutex_(result_mutex),
+        next_unit_(next_unit) {}
+
+  void RunInternal() override {
+    while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
+                                          executed_units_, result_mutex_,
+                                          next_unit_)) {
+    }
+    on_finished_->Signal();
+  }
+
+  Isolate* isolate_;
+  std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
+  std::queue<compiler::WasmCompilationUnit*>* executed_units_;
+  base::Semaphore* on_finished_;
+  base::Mutex* result_mutex_;
+  base::AtomicNumber<size_t>* next_unit_;
+};
+
+static void RecordStats(Isolate* isolate, Code* code) {
+  isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
+  isolate->counters()->wasm_reloc_size()->Increment(
+      code->relocation_info()->length());
+}
+
+static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
+  DisallowHeapAllocation no_gc;
+  for (int i = 0; i < functions->length(); ++i) {
+    RecordStats(isolate, Code::cast(functions->get(i)));
+  }
+}
+
+Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
+                                              JSObject* owner) {
+  Address old_address = nullptr;
+  Object* stored_value = owner->GetInternalField(kWasmGlobalsArrayBuffer);
+  if (stored_value != undefined) {
+    old_address = static_cast<Address>(
+        JSArrayBuffer::cast(stored_value)->backing_store());
+  }
+  return old_address;
+}
+
+Handle<FixedArray> GetImportsData(Factory* factory, const WasmModule* module) {
+  Handle<FixedArray> ret = factory->NewFixedArray(
+      static_cast<int>(module->import_table.size()), TENURED);
+  for (size_t i = 0; i < module->import_table.size(); ++i) {
+    const WasmImport& import = module->import_table[i];
+    if (import.kind != kExternalFunction) continue;
+    WasmName module_name = module->GetNameOrNull(import.module_name_offset,
+                                                 import.module_name_length);
+    WasmName function_name = module->GetNameOrNull(import.field_name_offset,
+                                                   import.field_name_length);
+
+    Handle<String> module_name_string =
+        factory->InternalizeUtf8String(module_name);
+    Handle<String> function_name_string =
+        function_name.is_empty()
+            ? Handle<String>::null()
+            : factory->InternalizeUtf8String(function_name);
+    FunctionSig* fsig = module->functions[import.index].sig;
+    Handle<ByteArray> sig = factory->NewByteArray(
+        static_cast<int>(fsig->parameter_count() + fsig->return_count()),
+        TENURED);
+    sig->copy_in(0, reinterpret_cast<const byte*>(fsig->raw_data()),
+                 sig->length());
+    Handle<FixedArray> encoded_import =
+        factory->NewFixedArray(kWasmImportDataSize, TENURED);
+    encoded_import->set(kModuleName, *module_name_string);
+    if (!function_name_string.is_null()) {
+      encoded_import->set(kFunctionName, *function_name_string);
+    }
+    encoded_import->set(kOutputCount,
+                        Smi::FromInt(static_cast<int>(fsig->return_count())));
+    encoded_import->set(kSignature, *sig);
+    ret->set(static_cast<int>(i), *encoded_import);
+  }
+  return ret;
+}
 
 static MaybeHandle<JSFunction> ReportFFIError(
-    ErrorThrower& thrower, const char* error, uint32_t index,
+    ErrorThrower* thrower, const char* error, uint32_t index,
     Handle<String> module_name, MaybeHandle<String> function_name) {
   Handle<String> function_name_handle;
   if (function_name.ToHandle(&function_name_handle)) {
-    thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
-                  index, module_name->length(), module_name->ToCString().get(),
-                  function_name_handle->length(),
-                  function_name_handle->ToCString().get(), error);
+    thrower->Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
+                   index, module_name->length(), module_name->ToCString().get(),
+                   function_name_handle->length(),
+                   function_name_handle->ToCString().get(), error);
   } else {
-    thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
-                  module_name->length(), module_name->ToCString().get(), error);
+    thrower->Error("Import #%d module=\"%.*s\" error: %s", index,
+                   module_name->length(), module_name->ToCString().get(),
+                   error);
   }
-  thrower.Error("Import ");
+  thrower->Error("Import ");
   return MaybeHandle<JSFunction>();
 }
 
 static MaybeHandle<JSReceiver> LookupFunction(
-    ErrorThrower& thrower, Factory* factory, Handle<JSReceiver> ffi,
+    ErrorThrower* thrower, Factory* factory, Handle<JSReceiver> ffi,
     uint32_t index, Handle<String> module_name,
     MaybeHandle<String> function_name) {
   if (ffi.is_null()) {
@@ -517,213 +476,88 @@
   return Handle<JSReceiver>::cast(function);
 }
 
-namespace {
-// Fetches the compilation unit of a wasm function and executes its parallel
-// phase.
-bool FetchAndExecuteCompilationUnit(
-    Isolate* isolate,
-    std::vector<compiler::WasmCompilationUnit*>* compilation_units,
-    std::queue<compiler::WasmCompilationUnit*>* executed_units,
-    base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
-  DisallowHeapAllocation no_allocation;
-  DisallowHandleAllocation no_handles;
-  DisallowHandleDereference no_deref;
-  DisallowCodeDependencyChange no_dependency_change;
+Handle<Code> CompileImportWrapper(Isolate* isolate,
+                                  const Handle<JSReceiver> ffi, int index,
+                                  Handle<FixedArray> import_data,
+                                  ErrorThrower* thrower) {
+  Handle<FixedArray> data =
+      import_data->GetValueChecked<FixedArray>(isolate, index);
+  Handle<String> module_name =
+      data->GetValueChecked<String>(isolate, kModuleName);
+  MaybeHandle<String> function_name =
+      data->GetValue<String>(isolate, kFunctionName);
 
-  // - 1 because AtomicIntrement returns the value after the atomic increment.
-  size_t index = next_unit->Increment(1) - 1;
-  if (index >= compilation_units->size()) {
-    return false;
-  }
+  // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
+  // it when we rationalize signed/unsigned stuff.
+  int ret_count = Smi::cast(data->get(kOutputCount))->value();
+  CHECK_GE(ret_count, 0);
+  Handle<ByteArray> sig_data =
+      data->GetValueChecked<ByteArray>(isolate, kSignature);
+  int sig_data_size = sig_data->length();
+  int param_count = sig_data_size - ret_count;
+  CHECK(param_count >= 0);
 
-  compiler::WasmCompilationUnit* unit = compilation_units->at(index);
-  if (unit != nullptr) {
-    unit->ExecuteCompilation();
-    {
-      base::LockGuard<base::Mutex> guard(result_mutex);
-      executed_units->push(unit);
-    }
-  }
-  return true;
-}
-
-class WasmCompilationTask : public CancelableTask {
- public:
-  WasmCompilationTask(
-      Isolate* isolate,
-      std::vector<compiler::WasmCompilationUnit*>* compilation_units,
-      std::queue<compiler::WasmCompilationUnit*>* executed_units,
-      base::Semaphore* on_finished, base::Mutex* result_mutex,
-      base::AtomicNumber<size_t>* next_unit)
-      : CancelableTask(isolate),
-        isolate_(isolate),
-        compilation_units_(compilation_units),
-        executed_units_(executed_units),
-        on_finished_(on_finished),
-        result_mutex_(result_mutex),
-        next_unit_(next_unit) {}
-
-  void RunInternal() override {
-    while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
-                                          executed_units_, result_mutex_,
-                                          next_unit_)) {
-    }
-    on_finished_->Signal();
-  }
-
-  Isolate* isolate_;
-  std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
-  std::queue<compiler::WasmCompilationUnit*>* executed_units_;
-  base::Semaphore* on_finished_;
-  base::Mutex* result_mutex_;
-  base::AtomicNumber<size_t>* next_unit_;
-};
-
-static void RecordStats(Isolate* isolate, Code* code) {
-  isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
-  isolate->counters()->wasm_reloc_size()->Increment(
-      code->relocation_info()->length());
-}
-
-static void RecordStats(Isolate* isolate,
-                        const std::vector<Handle<Code>>& functions) {
-  for (Handle<Code> c : functions) RecordStats(isolate, *c);
-}
-
-static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
-  DisallowHeapAllocation no_gc;
-  for (int i = 0; i < functions->length(); ++i) {
-    RecordStats(isolate, Code::cast(functions->get(i)));
-  }
-}
-
-Handle<FixedArray> GetImportsMetadata(Factory* factory,
-                                      const WasmModule* module) {
-  Handle<FixedArray> ret = factory->NewFixedArray(
-      static_cast<int>(module->import_table.size()), TENURED);
-  for (size_t i = 0; i < module->import_table.size(); ++i) {
-    const WasmImport& import = module->import_table[i];
-    WasmName module_name = module->GetNameOrNull(import.module_name_offset,
-                                                 import.module_name_length);
-    WasmName function_name = module->GetNameOrNull(import.function_name_offset,
-                                                   import.function_name_length);
-
-    Handle<String> module_name_string =
-        factory->InternalizeUtf8String(module_name);
-    Handle<String> function_name_string =
-        function_name.is_empty()
-            ? Handle<String>::null()
-            : factory->InternalizeUtf8String(function_name);
-    Handle<ByteArray> sig =
-        factory->NewByteArray(static_cast<int>(import.sig->parameter_count() +
-                                               import.sig->return_count()),
-                              TENURED);
-    sig->copy_in(0, reinterpret_cast<const byte*>(import.sig->raw_data()),
-                 sig->length());
-    Handle<FixedArray> encoded_import =
-        factory->NewFixedArray(kWasmImportDataTableSize, TENURED);
-    encoded_import->set(kModuleName, *module_name_string);
-    if (!function_name_string.is_null()) {
-      encoded_import->set(kFunctionName, *function_name_string);
-    }
-    encoded_import->set(
-        kOutputCount,
-        Smi::FromInt(static_cast<int>(import.sig->return_count())));
-    encoded_import->set(kSignature, *sig);
-    ret->set(static_cast<int>(i), *encoded_import);
-  }
-  return ret;
-}
-
-bool CompileWrappersToImportedFunctions(Isolate* isolate,
-                                        const Handle<JSReceiver> ffi,
-                                        std::vector<Handle<Code>>& imports,
-                                        Handle<FixedArray> import_data,
-                                        ErrorThrower* thrower) {
-  uint32_t import_count = static_cast<uint32_t>(import_data->length());
-  if (import_count > 0) {
-    imports.reserve(import_count);
-    for (uint32_t index = 0; index < import_count; ++index) {
-      Handle<FixedArray> data =
-          import_data->GetValueChecked<FixedArray>(isolate, index);
-      Handle<String> module_name =
-          data->GetValueChecked<String>(isolate, kModuleName);
-      MaybeHandle<String> function_name =
-          data->GetValue<String>(isolate, kFunctionName);
-
-      // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
-      // it when we rationalize signed/unsigned stuff.
-      int ret_count = Smi::cast(data->get(kOutputCount))->value();
-      CHECK(ret_count >= 0);
-      Handle<ByteArray> sig_data =
-          data->GetValueChecked<ByteArray>(isolate, kSignature);
-      int sig_data_size = sig_data->length();
-      int param_count = sig_data_size - ret_count;
-      CHECK(param_count >= 0);
-
-      MaybeHandle<JSReceiver> function = LookupFunction(
-          *thrower, isolate->factory(), ffi, index, module_name, function_name);
-      if (function.is_null()) return false;
-      Handle<Code> code;
-      Handle<JSReceiver> target = function.ToHandleChecked();
-      bool isMatch = false;
-      Handle<Code> export_wrapper_code;
-      if (target->IsJSFunction()) {
-        Handle<JSFunction> func = Handle<JSFunction>::cast(target);
-        export_wrapper_code = handle(func->code());
-        if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
-          int exported_param_count =
-              Smi::cast(func->GetInternalField(kInternalArity))->value();
-          Handle<ByteArray> exportedSig = Handle<ByteArray>(
-              ByteArray::cast(func->GetInternalField(kInternalSignature)));
-          if (exported_param_count == param_count &&
-              exportedSig->length() == sig_data->length() &&
-              memcmp(exportedSig->data(), sig_data->data(),
-                     exportedSig->length()) == 0) {
-            isMatch = true;
-          }
-        }
+  MaybeHandle<JSReceiver> function = LookupFunction(
+      thrower, isolate->factory(), ffi, index, module_name, function_name);
+  if (function.is_null()) return Handle<Code>::null();
+  Handle<Code> code;
+  Handle<JSReceiver> target = function.ToHandleChecked();
+  bool isMatch = false;
+  Handle<Code> export_wrapper_code;
+  if (target->IsJSFunction()) {
+    Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+    export_wrapper_code = handle(func->code());
+    if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
+      int exported_param_count =
+          Smi::cast(func->GetInternalField(kInternalArity))->value();
+      Handle<ByteArray> exportedSig = Handle<ByteArray>(
+          ByteArray::cast(func->GetInternalField(kInternalSignature)));
+      if (exported_param_count == param_count &&
+          exportedSig->length() == sig_data->length() &&
+          memcmp(exportedSig->data(), sig_data->data(),
+                 exportedSig->length()) == 0) {
+        isMatch = true;
       }
-      if (isMatch) {
-        int wasm_count = 0;
-        int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
-        for (RelocIterator it(*export_wrapper_code, mask); !it.done();
-             it.next()) {
-          RelocInfo* rinfo = it.rinfo();
-          Address target_address = rinfo->target_address();
-          Code* target = Code::GetCodeFromTargetAddress(target_address);
-          if (target->kind() == Code::WASM_FUNCTION) {
-            ++wasm_count;
-            code = handle(target);
-          }
-        }
-        DCHECK(wasm_count == 1);
-      } else {
-        // Copy the signature to avoid a raw pointer into a heap object when
-        // GC can happen.
-        Zone zone(isolate->allocator());
-        MachineRepresentation* reps =
-            zone.NewArray<MachineRepresentation>(sig_data_size);
-        memcpy(reps, sig_data->data(),
-               sizeof(MachineRepresentation) * sig_data_size);
-        FunctionSig sig(ret_count, param_count, reps);
-
-        code = compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
-                                                module_name, function_name);
-      }
-      imports.push_back(code);
     }
   }
-  return true;
+  if (isMatch) {
+    int wasm_count = 0;
+    int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+    for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      Address target_address = rinfo->target_address();
+      Code* target = Code::GetCodeFromTargetAddress(target_address);
+      if (target->kind() == Code::WASM_FUNCTION) {
+        ++wasm_count;
+        code = handle(target);
+      }
+    }
+    DCHECK(wasm_count == 1);
+    return code;
+  } else {
+    // Copy the signature to avoid a raw pointer into a heap object when
+    // GC can happen.
+    Zone zone(isolate->allocator());
+    MachineRepresentation* reps =
+        zone.NewArray<MachineRepresentation>(sig_data_size);
+    memcpy(reps, sig_data->data(),
+           sizeof(MachineRepresentation) * sig_data_size);
+    FunctionSig sig(ret_count, param_count, reps);
+
+    return compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
+                                            module_name, function_name);
+  }
 }
 
 void InitializeParallelCompilation(
     Isolate* isolate, const std::vector<WasmFunction>& functions,
     std::vector<compiler::WasmCompilationUnit*>& compilation_units,
-    ModuleEnv& module_env, ErrorThrower& thrower) {
+    ModuleEnv& module_env, ErrorThrower* thrower) {
   for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
-    compilation_units[i] = new compiler::WasmCompilationUnit(
-        &thrower, isolate, &module_env, &functions[i], i);
+    const WasmFunction* func = &functions[i];
+    compilation_units[i] =
+        func->imported ? nullptr : new compiler::WasmCompilationUnit(
+                                       thrower, isolate, &module_env, func, i);
   }
 }
 
@@ -812,7 +646,7 @@
   // 1) The main thread allocates a compilation unit for each wasm function
   //    and stores them in the vector {compilation_units}.
   InitializeParallelCompilation(isolate, module->functions, compilation_units,
-                                *module_env, *thrower);
+                                *module_env, thrower);
 
   // Objects for the synchronization with the background threads.
   base::Mutex result_mutex;
@@ -853,8 +687,8 @@
   for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
        i < module->functions.size(); ++i) {
     const WasmFunction& func = module->functions[i];
+    if (func.imported) continue;  // Imports are compiled at instantiation time.
 
-    DCHECK_EQ(i, func.func_index);
     WasmName str = module->GetName(func.name_offset, func.name_length);
     Handle<Code> code = Handle<Code>::null();
     // Compile the function.
@@ -870,361 +704,161 @@
   }
 }
 
-void SetDebugSupport(Factory* factory, Handle<FixedArray> compiled_module,
-                     Handle<JSObject> js_object) {
-  Isolate* isolate = compiled_module->GetIsolate();
-  MaybeHandle<String> module_bytes_string =
-      compiled_module->GetValue<String>(isolate, kModuleBytes);
-  if (!module_bytes_string.is_null()) {
-    js_object->SetInternalField(kWasmModuleBytesString,
-                                *module_bytes_string.ToHandleChecked());
+void PatchDirectCalls(Handle<FixedArray> old_functions,
+                      Handle<FixedArray> new_functions, int start) {
+  DCHECK_EQ(new_functions->length(), old_functions->length());
+
+  DisallowHeapAllocation no_gc;
+  std::map<Code*, Code*> old_to_new_code;
+  for (int i = 0; i < new_functions->length(); ++i) {
+    old_to_new_code.insert(std::make_pair(Code::cast(old_functions->get(i)),
+                                          Code::cast(new_functions->get(i))));
   }
-  Handle<FixedArray> functions = Handle<FixedArray>(
-      FixedArray::cast(js_object->GetInternalField(kWasmModuleCodeTable)));
-
-  for (int i = FLAG_skip_compiling_wasm_funcs; i < functions->length(); ++i) {
-    Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
-    DCHECK(code->deoptimization_data() == nullptr ||
-           code->deoptimization_data()->length() == 0);
-    Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
-    if (!js_object.is_null()) {
-      deopt_data->set(0, *js_object);
-    }
-    deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
-    deopt_data->set_length(2);
-    code->set_deoptimization_data(*deopt_data);
-  }
-
-  MaybeHandle<ByteArray> function_name_table =
-      compiled_module->GetValue<ByteArray>(isolate, kFunctionNameTable);
-  if (!function_name_table.is_null()) {
-    js_object->SetInternalField(kWasmFunctionNamesArray,
-                                *function_name_table.ToHandleChecked());
-  }
-}
-
-bool SetupGlobals(Isolate* isolate, Handle<FixedArray> compiled_module,
-                  Handle<JSObject> instance, ErrorThrower* thrower) {
-  uint32_t globals_size = static_cast<uint32_t>(
-      Smi::cast(compiled_module->get(kGlobalsSize))->value());
-  if (globals_size > 0) {
-    Handle<JSArrayBuffer> globals_buffer =
-        NewArrayBuffer(isolate, globals_size);
-    if (globals_buffer.is_null()) {
-      thrower->Error("Out of memory: wasm globals");
-      return false;
-    }
-    RelocateGlobals(instance,
-                    static_cast<Address>(globals_buffer->backing_store()));
-    instance->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
-  }
-  return true;
-}
-
-bool SetupInstanceHeap(Isolate* isolate, Handle<FixedArray> compiled_module,
-                       Handle<JSObject> instance, Handle<JSArrayBuffer> memory,
-                       ErrorThrower* thrower) {
-  uint32_t min_mem_pages = static_cast<uint32_t>(
-      Smi::cast(compiled_module->get(kMinRequiredMemory))->value());
-  isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
-  // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
-
-  if (memory.is_null() && min_mem_pages > 0) {
-    memory = AllocateMemory(thrower, isolate, min_mem_pages);
-    if (memory.is_null()) {
-      return false;
-    }
-  }
-
-  if (!memory.is_null()) {
-    instance->SetInternalField(kWasmMemArrayBuffer, *memory);
-    Address mem_start = static_cast<Address>(memory->backing_store());
-    uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
-    RelocateInstanceCode(instance, mem_start,
-                         WasmModule::kPageSize * min_mem_pages, mem_size);
-    LoadDataSegments(compiled_module, mem_start, mem_size);
-  }
-  return true;
-}
-
-bool SetupImports(Isolate* isolate, Handle<FixedArray> compiled_module,
-                  Handle<JSObject> instance, ErrorThrower* thrower,
-                  Handle<JSReceiver> ffi) {
-  //-------------------------------------------------------------------------
-  // Compile wrappers to imported functions.
-  //-------------------------------------------------------------------------
-  std::vector<Handle<Code>> import_code;
-  MaybeHandle<FixedArray> maybe_import_data =
-      compiled_module->GetValue<FixedArray>(isolate, kImportData);
-  Handle<FixedArray> import_data;
-  if (maybe_import_data.ToHandle(&import_data)) {
-    if (!CompileWrappersToImportedFunctions(isolate, ffi, import_code,
-                                            import_data, thrower)) {
-      return false;
-    }
-  }
-
-  RecordStats(isolate, import_code);
-
-  Handle<FixedArray> code_table = Handle<FixedArray>(
-      FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
-  // TODO(mtrofin): get the code off std::vector and on FixedArray, for
-  // consistency.
-  std::vector<Handle<Code>> function_code(code_table->length());
-  for (int i = 0; i < code_table->length(); ++i) {
-    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
-    function_code[i] = code;
-  }
-
-  LinkImports(isolate, function_code, import_code);
-  return true;
-}
-
-bool SetupExportsObject(Handle<FixedArray> compiled_module, Isolate* isolate,
-                        Handle<JSObject> instance, ErrorThrower* thrower) {
-  Factory* factory = isolate->factory();
-  bool mem_export =
-      static_cast<bool>(Smi::cast(compiled_module->get(kExportMem))->value());
-  ModuleOrigin origin = static_cast<ModuleOrigin>(
-      Smi::cast(compiled_module->get(kOrigin))->value());
-
-  MaybeHandle<FixedArray> maybe_exports =
-      compiled_module->GetValue<FixedArray>(isolate, kExports);
-  if (!maybe_exports.is_null() || mem_export) {
-    PropertyDescriptor desc;
-    desc.set_writable(false);
-
-    Handle<JSObject> exports_object = instance;
-    if (origin == kWasmOrigin) {
-      // Create the "exports" object.
-      Handle<JSFunction> object_function = Handle<JSFunction>(
-          isolate->native_context()->object_function(), isolate);
-      exports_object = factory->NewJSObject(object_function, TENURED);
-      Handle<String> exports_name = factory->InternalizeUtf8String("exports");
-      JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
-    }
-    Handle<FixedArray> exports;
-    if (maybe_exports.ToHandle(&exports)) {
-      int exports_size = exports->length();
-      for (int i = 0; i < exports_size; ++i) {
-        if (thrower->error()) return false;
-        Handle<FixedArray> export_metadata =
-            exports->GetValueChecked<FixedArray>(isolate, i);
-        Handle<Code> export_code =
-            export_metadata->GetValueChecked<Code>(isolate, kExportCode);
-        RecordStats(isolate, *export_code);
-        Handle<String> name =
-            export_metadata->GetValueChecked<String>(isolate, kExportName);
-        int arity = Smi::cast(export_metadata->get(kExportArity))->value();
-        MaybeHandle<ByteArray> signature =
-            export_metadata->GetValue<ByteArray>(isolate, kExportedSignature);
-        Handle<JSFunction> function = WrapExportCodeAsJSFunction(
-            isolate, export_code, name, arity, signature, instance);
-        desc.set_value(function);
-        Maybe<bool> status = JSReceiver::DefineOwnProperty(
-            isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
-        if (!status.IsJust()) {
-          thrower->Error("export of %.*s failed.", name->length(),
-                         name->ToCString().get());
-          return false;
+  int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+  AllowDeferredHandleDereference embedding_raw_address;
+  for (int i = start; i < new_functions->length(); ++i) {
+    Code* wasm_function = Code::cast(new_functions->get(i));
+    for (RelocIterator it(wasm_function, mode_mask); !it.done(); it.next()) {
+      Code* old_code =
+          Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+      if (old_code->kind() == Code::WASM_TO_JS_FUNCTION ||
+          old_code->kind() == Code::WASM_FUNCTION) {
+        auto found = old_to_new_code.find(old_code);
+        DCHECK(found != old_to_new_code.end());
+        Code* new_code = found->second;
+        if (new_code != old_code) {
+          it.rinfo()->set_target_address(new_code->instruction_start(),
+                                         UPDATE_WRITE_BARRIER,
+                                         SKIP_ICACHE_FLUSH);
         }
       }
     }
-    if (mem_export) {
-      // Export the memory as a named property.
-      Handle<String> name = factory->InternalizeUtf8String("memory");
-      Handle<JSArrayBuffer> memory = Handle<JSArrayBuffer>(
-          JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
-      JSObject::AddProperty(exports_object, name, memory, READ_ONLY);
-    }
   }
-  return true;
 }
 
-}  // namespace
+static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
+                                WasmCompiledModule* compiled_module) {
+  TRACE("Resetting %d\n", compiled_module->instance_id());
+  Object* undefined = *isolate->factory()->undefined_value();
+  uint32_t old_mem_size = compiled_module->has_heap()
+                              ? compiled_module->mem_size()
+                              : compiled_module->default_mem_size();
+  uint32_t default_mem_size = compiled_module->default_mem_size();
+  Object* mem_start = compiled_module->ptr_to_heap();
+  Address old_mem_address = nullptr;
+  Address globals_start =
+      GetGlobalStartAddressFromCodeTemplate(undefined, owner);
 
-MaybeHandle<FixedArray> WasmModule::CompileFunctions(
-    Isolate* isolate, ErrorThrower* thrower) const {
-  Factory* factory = isolate->factory();
-
-  MaybeHandle<FixedArray> nothing;
-
-  WasmModuleInstance temp_instance_for_compilation(this);
-  temp_instance_for_compilation.context = isolate->native_context();
-  temp_instance_for_compilation.mem_size = GetMinModuleMemSize(this);
-  temp_instance_for_compilation.mem_start = nullptr;
-  temp_instance_for_compilation.globals_start = nullptr;
-
-  MaybeHandle<FixedArray> indirect_table =
-      function_tables.size()
-          ? factory->NewFixedArray(static_cast<int>(function_tables.size()),
-                                   TENURED)
-          : MaybeHandle<FixedArray>();
-  for (uint32_t i = 0; i < function_tables.size(); ++i) {
-    Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
-    temp_instance_for_compilation.function_tables[i] = values;
-
-    Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
-        kWasmIndirectFunctionTableMetadataSize, TENURED);
-    metadata->set(kSize, Smi::FromInt(function_tables[i].size));
-    metadata->set(kTable, *values);
-    indirect_table.ToHandleChecked()->set(i, *metadata);
+  if (old_mem_size > 0) {
+    CHECK_NE(mem_start, undefined);
+    old_mem_address =
+        static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
   }
+  int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE);
 
-  HistogramTimerScope wasm_compile_module_time_scope(
-      isolate->counters()->wasm_compile_module_time());
-
-  ModuleEnv module_env;
-  module_env.module = this;
-  module_env.instance = &temp_instance_for_compilation;
-  module_env.origin = origin;
-  InitializePlaceholders(factory, &module_env.placeholders, functions.size());
-
-  Handle<FixedArray> compiled_functions =
-      factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
-
-  temp_instance_for_compilation.import_code.resize(import_table.size());
-  for (uint32_t i = 0; i < import_table.size(); ++i) {
-    temp_instance_for_compilation.import_code[i] =
-        CreatePlaceholder(factory, i, Code::WASM_TO_JS_FUNCTION);
-  }
-  isolate->counters()->wasm_functions_per_module()->AddSample(
-      static_cast<int>(functions.size()));
-  if (FLAG_wasm_num_compilation_tasks != 0) {
-    CompileInParallel(isolate, this,
-                      temp_instance_for_compilation.function_code, thrower,
-                      &module_env);
-  } else {
-    CompileSequentially(isolate, this,
-                        temp_instance_for_compilation.function_code, thrower,
-                        &module_env);
-  }
-  if (thrower->error()) return nothing;
-
-  // At this point, compilation has completed. Update the code table.
-  for (size_t i = FLAG_skip_compiling_wasm_funcs;
-       i < temp_instance_for_compilation.function_code.size(); ++i) {
-    Code* code = *temp_instance_for_compilation.function_code[i];
-    compiled_functions->set(static_cast<int>(i), code);
-  }
-
-  // Create the compiled module object, and populate with compiled functions
-  // and information needed at instantiation time. This object needs to be
-  // serializable. Instantiation may occur off a deserialized version of this
-  // object.
-  Handle<FixedArray> ret =
-      factory->NewFixedArray(kCompiledWasmObjectTableSize, TENURED);
-  ret->set(kFunctions, *compiled_functions);
-  if (!indirect_table.is_null()) {
-    ret->set(kTableOfIndirectFunctionTables, *indirect_table.ToHandleChecked());
-  }
-  Handle<FixedArray> import_data = GetImportsMetadata(factory, this);
-  ret->set(kImportData, *import_data);
-
-  // Compile export functions.
-  int export_size = static_cast<int>(export_table.size());
-  Handle<Code> startup_fct;
-  if (export_size > 0) {
-    Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
-    for (int i = 0; i < export_size; ++i) {
-      Handle<FixedArray> export_metadata =
-          factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
-      const WasmExport& exp = export_table[i];
-      FunctionSig* funcSig = functions[exp.func_index].sig;
-      Handle<ByteArray> exportedSig =
-          factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
-                                                 funcSig->return_count()),
-                                TENURED);
-      exportedSig->copy_in(0,
-                           reinterpret_cast<const byte*>(funcSig->raw_data()),
-                           exportedSig->length());
-      export_metadata->set(kExportedSignature, *exportedSig);
-      WasmName str = GetName(exp.name_offset, exp.name_length);
-      Handle<String> name = factory->InternalizeUtf8String(str);
-      Handle<Code> code =
-          temp_instance_for_compilation.function_code[exp.func_index];
-      Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
-          isolate, &module_env, code, exp.func_index);
-      if (thrower->error()) return nothing;
-      export_metadata->set(kExportCode, *export_code);
-      export_metadata->set(kExportName, *name);
-      export_metadata->set(
-          kExportArity, Smi::FromInt(static_cast<int>(
-                            functions[exp.func_index].sig->parameter_count())));
-      export_metadata->set(kExportedFunctionIndex,
-                           Smi::FromInt(static_cast<int>(exp.func_index)));
-      exports->set(i, *export_metadata);
-      if (exp.func_index == start_function_index) {
-        startup_fct = export_code;
+  Object* fct_obj = compiled_module->ptr_to_code_table();
+  if (fct_obj != nullptr && fct_obj != undefined &&
+      (old_mem_size > 0 || globals_start != nullptr)) {
+    FixedArray* functions = FixedArray::cast(fct_obj);
+    for (int i = 0; i < functions->length(); ++i) {
+      Code* code = Code::cast(functions->get(i));
+      bool changed = false;
+      for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+        RelocInfo::Mode mode = it.rinfo()->rmode();
+        if (RelocInfo::IsWasmMemoryReference(mode) ||
+            RelocInfo::IsWasmMemorySizeReference(mode)) {
+          it.rinfo()->update_wasm_memory_reference(
+              old_mem_address, nullptr, old_mem_size, default_mem_size);
+          changed = true;
+        } else {
+          CHECK(RelocInfo::IsWasmGlobalReference(mode));
+          it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
+          changed = true;
+        }
+      }
+      if (changed) {
+        Assembler::FlushICache(isolate, code->instruction_start(),
+                               code->instruction_size());
       }
     }
-    ret->set(kExports, *exports);
   }
-
-  // Compile startup function, if we haven't already.
-  if (start_function_index >= 0) {
-    uint32_t index = static_cast<uint32_t>(start_function_index);
-    HandleScope scope(isolate);
-    if (startup_fct.is_null()) {
-      Handle<Code> code = temp_instance_for_compilation.function_code[index];
-      DCHECK_EQ(0, functions[index].sig->parameter_count());
-      startup_fct =
-          compiler::CompileJSToWasmWrapper(isolate, &module_env, code, index);
-    }
-    Handle<FixedArray> metadata =
-        factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
-    metadata->set(kExportCode, *startup_fct);
-    metadata->set(kExportArity, Smi::FromInt(0));
-    metadata->set(kExportedFunctionIndex, Smi::FromInt(start_function_index));
-    ret->set(kStartupFunction, *metadata);
-  }
-
-  // TODO(wasm): saving the module bytes for debugging is wasteful. We should
-  // consider downloading this on-demand.
-  {
-    size_t module_bytes_len = module_end - module_start;
-    DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
-    Vector<const uint8_t> module_bytes_vec(module_start,
-                                           static_cast<int>(module_bytes_len));
-    Handle<String> module_bytes_string =
-        factory->NewStringFromOneByte(module_bytes_vec, TENURED)
-            .ToHandleChecked();
-    ret->set(kModuleBytes, *module_bytes_string);
-  }
-
-  Handle<ByteArray> function_name_table =
-      BuildFunctionNamesTable(isolate, module_env.module);
-  ret->set(kFunctionNameTable, *function_name_table);
-  ret->set(kMinRequiredMemory, Smi::FromInt(min_mem_pages));
-  if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
-  ret->set(kGlobalsSize, Smi::FromInt(globals_size));
-  ret->set(kExportMem, Smi::FromInt(mem_export));
-  ret->set(kOrigin, Smi::FromInt(origin));
-  return ret;
+  compiled_module->reset_heap();
 }
 
-void PatchJSWrapper(Isolate* isolate, Handle<Code> wrapper,
-                    Handle<Code> new_target) {
-  AllowDeferredHandleDereference embedding_raw_address;
-  bool seen = false;
-  for (RelocIterator it(*wrapper, 1 << RelocInfo::CODE_TARGET); !it.done();
-       it.next()) {
-    Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-    if (target->kind() == Code::WASM_FUNCTION) {
-      DCHECK(!seen);
-      seen = true;
-      it.rinfo()->set_target_address(new_target->instruction_start(),
-                                     UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+  JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+  JSObject* owner = *p;
+  WasmCompiledModule* compiled_module =
+      WasmCompiledModule::cast(owner->GetInternalField(kWasmCompiledModule));
+  TRACE("Finalizing %d {\n", compiled_module->instance_id());
+  Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+  DCHECK(compiled_module->has_weak_module_object());
+  WeakCell* weak_module_obj = compiled_module->ptr_to_weak_module_object();
+
+  // weak_module_obj may have been cleared, meaning the module object
+  // was GC-ed. In that case, there won't be any new instances created,
+  // and we don't need to maintain the links between instances.
+  if (!weak_module_obj->cleared()) {
+    JSObject* module_obj = JSObject::cast(weak_module_obj->value());
+    WasmCompiledModule* current_template =
+        WasmCompiledModule::cast(module_obj->GetInternalField(0));
+
+    TRACE("chain before {\n");
+    TRACE_CHAIN(current_template);
+    TRACE("}\n");
+
+    DCHECK(!current_template->has_weak_prev_instance());
+    WeakCell* next = compiled_module->ptr_to_weak_next_instance();
+    WeakCell* prev = compiled_module->ptr_to_weak_prev_instance();
+
+    if (current_template == compiled_module) {
+      if (next == nullptr) {
+        ResetCompiledModule(isolate, owner, compiled_module);
+      } else {
+        DCHECK(next->value()->IsFixedArray());
+        module_obj->SetInternalField(0, next->value());
+        DCHECK_NULL(prev);
+        WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+      }
+    } else {
+      DCHECK(!(prev == nullptr && next == nullptr));
+      // the only reason prev or next would be cleared is if the
+      // respective objects got collected, but if that happened,
+      // we would have relinked the list.
+      if (prev != nullptr) {
+        DCHECK(!prev->cleared());
+        if (next == nullptr) {
+          WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
+        } else {
+          WasmCompiledModule::cast(prev->value())
+              ->set_ptr_to_weak_next_instance(next);
+        }
+      }
+      if (next != nullptr) {
+        DCHECK(!next->cleared());
+        if (prev == nullptr) {
+          WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+        } else {
+          WasmCompiledModule::cast(next->value())
+              ->set_ptr_to_weak_prev_instance(prev);
+        }
+      }
     }
+    TRACE("chain after {\n");
+    TRACE_CHAIN(WasmCompiledModule::cast(module_obj->GetInternalField(0)));
+    TRACE("}\n");
   }
-  CHECK(seen);
-  Assembler::FlushICache(isolate, wrapper->instruction_start(),
-                         wrapper->instruction_size());
+  compiled_module->reset_weak_owning_instance();
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+  TRACE("}\n");
 }
 
 Handle<FixedArray> SetupIndirectFunctionTable(
     Isolate* isolate, Handle<FixedArray> wasm_functions,
-    Handle<FixedArray> indirect_table_template) {
+    Handle<FixedArray> indirect_table_template,
+    Handle<FixedArray> tables_to_replace) {
   Factory* factory = isolate->factory();
   Handle<FixedArray> cloned_indirect_tables =
       factory->CopyFixedArray(indirect_table_template);
@@ -1239,163 +873,633 @@
     Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
     cloned_metadata->set(kTable, *cloned_table);
     // Patch the cloned code to refer to the cloned kTable.
-    for (int i = 0; i < wasm_functions->length(); ++i) {
+    Handle<FixedArray> table_to_replace =
+        tables_to_replace->GetValueChecked<FixedArray>(isolate, i)
+            ->GetValueChecked<FixedArray>(isolate, kTable);
+    for (int fct_index = 0; fct_index < wasm_functions->length(); ++fct_index) {
       Handle<Code> wasm_function =
-          wasm_functions->GetValueChecked<Code>(isolate, i);
-      PatchFunctionTable(wasm_function, orig_table, cloned_table);
+          wasm_functions->GetValueChecked<Code>(isolate, fct_index);
+      PatchFunctionTable(wasm_function, table_to_replace, cloned_table);
     }
   }
   return cloned_indirect_tables;
 }
 
-Handle<FixedArray> CloneModuleForInstance(Isolate* isolate,
-                                          Handle<FixedArray> original) {
-  Factory* factory = isolate->factory();
-  Handle<FixedArray> clone = factory->CopyFixedArray(original);
+}  // namespace
 
-  // Clone each wasm code object.
-  Handle<FixedArray> orig_wasm_functions =
-      original->GetValueChecked<FixedArray>(isolate, kFunctions);
-  Handle<FixedArray> clone_wasm_functions =
-      factory->CopyFixedArray(orig_wasm_functions);
-  clone->set(kFunctions, *clone_wasm_functions);
-  for (int i = 0; i < clone_wasm_functions->length(); ++i) {
-    Handle<Code> orig_code =
-        clone_wasm_functions->GetValueChecked<Code>(isolate, i);
-    Handle<Code> cloned_code = factory->CopyCode(orig_code);
-    clone_wasm_functions->set(i, *cloned_code);
+const char* SectionName(WasmSectionCode code) {
+  switch (code) {
+    case kUnknownSectionCode:
+      return "Unknown";
+    case kTypeSectionCode:
+      return "Type";
+    case kImportSectionCode:
+      return "Import";
+    case kFunctionSectionCode:
+      return "Function";
+    case kTableSectionCode:
+      return "Table";
+    case kMemorySectionCode:
+      return "Memory";
+    case kGlobalSectionCode:
+      return "Global";
+    case kExportSectionCode:
+      return "Export";
+    case kStartSectionCode:
+      return "Start";
+    case kCodeSectionCode:
+      return "Code";
+    case kElementSectionCode:
+      return "Element";
+    case kDataSectionCode:
+      return "Data";
+    case kNameSectionCode:
+      return "Name";
+    default:
+      return "<unknown>";
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+  os << "WASM module with ";
+  os << (module.min_mem_pages * module.kPageSize) << " min mem";
+  os << (module.max_mem_pages * module.kPageSize) << " max mem";
+  os << module.functions.size() << " functions";
+  os << module.functions.size() << " globals";
+  os << module.functions.size() << " data segments";
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+  os << "WASM function with signature " << *function.sig;
+
+  os << " code bytes: "
+     << (function.code_end_offset - function.code_start_offset);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+  os << "#" << pair.function_->func_index << ":";
+  if (pair.function_->name_offset > 0) {
+    if (pair.module_) {
+      WasmName name = pair.module_->GetName(pair.function_->name_offset,
+                                            pair.function_->name_length);
+      os.write(name.start(), name.length());
+    } else {
+      os << "+" << pair.function_->func_index;
+    }
+  } else {
+    os << "?";
+  }
+  return os;
+}
+
+Handle<JSFunction> WrapExportCodeAsJSFunction(
+    Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
+    MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
+  Handle<SharedFunctionInfo> shared =
+      isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
+  shared->set_length(arity);
+  shared->set_internal_formal_parameter_count(arity);
+  Handle<JSFunction> function = isolate->factory()->NewFunction(
+      isolate->wasm_function_map(), name, export_code);
+  function->set_shared(*shared);
+
+  function->SetInternalField(kInternalModuleInstance, *module_instance);
+  // add another Internal Field as the function arity
+  function->SetInternalField(kInternalArity, Smi::FromInt(arity));
+  // add another Internal Field as the signature of the foreign function
+  Handle<ByteArray> signature;
+  if (maybe_signature.ToHandle(&signature)) {
+    function->SetInternalField(kInternalSignature, *signature);
+  }
+  return function;
+}
+
+Object* GetOwningWasmInstance(Code* code) {
+  DCHECK(code->kind() == Code::WASM_FUNCTION);
+  DisallowHeapAllocation no_gc;
+  FixedArray* deopt_data = code->deoptimization_data();
+  DCHECK_NOT_NULL(deopt_data);
+  DCHECK(deopt_data->length() == 2);
+  Object* weak_link = deopt_data->get(0);
+  if (!weak_link->IsWeakCell()) return nullptr;
+  WeakCell* cell = WeakCell::cast(weak_link);
+  return cell->value();
+}
+
+uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object) {
+  return static_cast<uint32_t>(
+      Smi::cast(wasm_object->GetInternalField(kWasmNumImportedFunctions))
+          ->value());
+}
+
+WasmModule::WasmModule(byte* module_start)
+    : module_start(module_start),
+      module_end(nullptr),
+      min_mem_pages(0),
+      max_mem_pages(0),
+      mem_export(false),
+      start_function_index(-1),
+      origin(kWasmOrigin),
+      globals_size(0),
+      num_imported_functions(0),
+      num_declared_functions(0),
+      num_exported_functions(0),
+      pending_tasks(new base::Semaphore(0)) {}
+
+MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
+    Isolate* isolate, ErrorThrower* thrower) const {
+  Factory* factory = isolate->factory();
+
+  MaybeHandle<WasmCompiledModule> nothing;
+
+  WasmModuleInstance temp_instance(this);
+  temp_instance.context = isolate->native_context();
+  temp_instance.mem_size = GetMinModuleMemSize(this);
+  temp_instance.mem_start = nullptr;
+  temp_instance.globals_start = nullptr;
+
+  MaybeHandle<FixedArray> indirect_table =
+      function_tables.size()
+          ? factory->NewFixedArray(static_cast<int>(function_tables.size()),
+                                   TENURED)
+          : MaybeHandle<FixedArray>();
+  for (uint32_t i = 0; i < function_tables.size(); ++i) {
+    Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
+    temp_instance.function_tables[i] = values;
+
+    Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
+        kWasmIndirectFunctionTableDataSize, TENURED);
+    metadata->set(kSize, Smi::FromInt(function_tables[i].size));
+    metadata->set(kTable, *values);
+    indirect_table.ToHandleChecked()->set(i, *metadata);
   }
 
-  MaybeHandle<FixedArray> maybe_orig_exports =
-      original->GetValue<FixedArray>(isolate, kExports);
-  Handle<FixedArray> orig_exports;
-  if (maybe_orig_exports.ToHandle(&orig_exports)) {
-    Handle<FixedArray> cloned_exports = factory->CopyFixedArray(orig_exports);
-    clone->set(kExports, *cloned_exports);
-    for (int i = 0; i < orig_exports->length(); ++i) {
-      Handle<FixedArray> export_metadata =
-          orig_exports->GetValueChecked<FixedArray>(isolate, i);
-      Handle<FixedArray> clone_metadata =
-          factory->CopyFixedArray(export_metadata);
-      cloned_exports->set(i, *clone_metadata);
-      Handle<Code> orig_code =
-          export_metadata->GetValueChecked<Code>(isolate, kExportCode);
-      Handle<Code> cloned_code = factory->CopyCode(orig_code);
-      clone_metadata->set(kExportCode, *cloned_code);
-      // TODO(wasm): This is actually a uint32_t, but since FixedArray indexes
-      // in int, we are taking the risk of invalid values.
-      int exported_fct_index =
-          Smi::cast(export_metadata->get(kExportedFunctionIndex))->value();
-      CHECK_GE(exported_fct_index, 0);
-      CHECK_LT(exported_fct_index, clone_wasm_functions->length());
-      Handle<Code> new_target = clone_wasm_functions->GetValueChecked<Code>(
-          isolate, exported_fct_index);
-      PatchJSWrapper(isolate, cloned_code, new_target);
+  HistogramTimerScope wasm_compile_module_time_scope(
+      isolate->counters()->wasm_compile_module_time());
+
+  ModuleEnv module_env;
+  module_env.module = this;
+  module_env.instance = &temp_instance;
+  module_env.origin = origin;
+
+  // The {code_table} array contains import wrappers and functions (which
+  // are both included in {functions.size()}, and export wrappers.
+  int code_table_size =
+      static_cast<int>(functions.size() + num_exported_functions);
+  Handle<FixedArray> code_table =
+      factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+
+  // Initialize the code table with placeholders.
+  for (uint32_t i = 0; i < functions.size(); i++) {
+    Code::Kind kind = Code::WASM_FUNCTION;
+    if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
+    Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
+    code_table->set(static_cast<int>(i), *placeholder);
+    temp_instance.function_code[i] = placeholder;
+  }
+
+  isolate->counters()->wasm_functions_per_module()->AddSample(
+      static_cast<int>(functions.size()));
+  if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
+    // Avoid a race condition by collecting results into a second vector.
+    std::vector<Handle<Code>> results;
+    results.reserve(temp_instance.function_code.size());
+    for (size_t i = 0; i < temp_instance.function_code.size(); i++) {
+      results.push_back(temp_instance.function_code[i]);
+    }
+    CompileInParallel(isolate, this, results, thrower, &module_env);
+
+    for (size_t i = 0; i < results.size(); i++) {
+      temp_instance.function_code[i] = results[i];
+    }
+  } else {
+    CompileSequentially(isolate, this, temp_instance.function_code, thrower,
+                        &module_env);
+  }
+  if (thrower->error()) return nothing;
+
+  // At this point, compilation has completed. Update the code table.
+  for (size_t i = FLAG_skip_compiling_wasm_funcs;
+       i < temp_instance.function_code.size(); ++i) {
+    Code* code = *temp_instance.function_code[i];
+    code_table->set(static_cast<int>(i), code);
+  }
+
+  // Link the functions in the module.
+  for (size_t i = FLAG_skip_compiling_wasm_funcs;
+       i < temp_instance.function_code.size(); ++i) {
+    Handle<Code> code = temp_instance.function_code[i];
+    bool modified = LinkFunction(code, temp_instance.function_code);
+    if (modified) {
+      // TODO(mtrofin): do we need to flush the cache here?
+      Assembler::FlushICache(isolate, code->instruction_start(),
+                             code->instruction_size());
     }
   }
 
-  MaybeHandle<FixedArray> maybe_startup =
-      original->GetValue<FixedArray>(isolate, kStartupFunction);
-  if (!maybe_startup.is_null()) {
-    Handle<FixedArray> startup_metadata =
-        factory->CopyFixedArray(maybe_startup.ToHandleChecked());
-    Handle<Code> startup_fct_clone = factory->CopyCode(
-        startup_metadata->GetValueChecked<Code>(isolate, kExportCode));
-    startup_metadata->set(kExportCode, *startup_fct_clone);
-    clone->set(kStartupFunction, *startup_metadata);
-    // TODO(wasm): see todo above about int vs size_t indexing in FixedArray.
-    int startup_fct_index =
-        Smi::cast(startup_metadata->get(kExportedFunctionIndex))->value();
-    CHECK_GE(startup_fct_index, 0);
-    CHECK_LT(startup_fct_index, clone_wasm_functions->length());
-    Handle<Code> new_target =
-        clone_wasm_functions->GetValueChecked<Code>(isolate, startup_fct_index);
-    PatchJSWrapper(isolate, startup_fct_clone, new_target);
+  // Create the compiled module object, and populate with compiled functions
+  // and information needed at instantiation time. This object needs to be
+  // serializable. Instantiation may occur off a deserialized version of this
+  // object.
+  Handle<WasmCompiledModule> ret = WasmCompiledModule::New(
+      isolate, min_mem_pages, globals_size, mem_export, origin);
+  ret->set_code_table(code_table);
+  if (!indirect_table.is_null()) {
+    ret->set_indirect_function_tables(indirect_table.ToHandleChecked());
   }
-  return clone;
+  Handle<FixedArray> import_data = GetImportsData(factory, this);
+  ret->set_import_data(import_data);
+
+  // Compile exported function wrappers.
+  int export_size = static_cast<int>(num_exported_functions);
+  if (export_size > 0) {
+    Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
+    int index = -1;
+
+    for (const WasmExport& exp : export_table) {
+      if (exp.kind != kExternalFunction)
+        continue;  // skip non-function exports.
+      index++;
+      Handle<FixedArray> export_data =
+          factory->NewFixedArray(kWasmExportDataSize, TENURED);
+      FunctionSig* funcSig = functions[exp.index].sig;
+      Handle<ByteArray> exportedSig =
+          factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
+                                                 funcSig->return_count()),
+                                TENURED);
+      exportedSig->copy_in(0,
+                           reinterpret_cast<const byte*>(funcSig->raw_data()),
+                           exportedSig->length());
+      export_data->set(kExportedSignature, *exportedSig);
+      WasmName str = GetName(exp.name_offset, exp.name_length);
+      Handle<String> name = factory->InternalizeUtf8String(str);
+      Handle<Code> code = code_table->GetValueChecked<Code>(isolate, exp.index);
+      Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
+          isolate, &module_env, code, exp.index);
+      if (thrower->error()) return nothing;
+      export_data->set(kExportName, *name);
+      export_data->set(kExportArity,
+                       Smi::FromInt(static_cast<int>(
+                           functions[exp.index].sig->parameter_count())));
+      export_data->set(kExportedFunctionIndex,
+                       Smi::FromInt(static_cast<int>(exp.index)));
+      exports->set(index, *export_data);
+      code_table->set(static_cast<int>(functions.size() + index), *export_code);
+    }
+    ret->set_exports(exports);
+  }
+
+  // Record data for startup function.
+  if (start_function_index >= 0) {
+    HandleScope scope(isolate);
+    Handle<FixedArray> startup_data =
+        factory->NewFixedArray(kWasmExportDataSize, TENURED);
+    startup_data->set(kExportArity, Smi::FromInt(0));
+    startup_data->set(kExportedFunctionIndex,
+                      Smi::FromInt(start_function_index));
+    ret->set_startup_function(startup_data);
+  }
+
+  // TODO(wasm): saving the module bytes for debugging is wasteful. We should
+  // consider downloading this on-demand.
+  {
+    size_t module_bytes_len = module_end - module_start;
+    DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
+    Vector<const uint8_t> module_bytes_vec(module_start,
+                                           static_cast<int>(module_bytes_len));
+    Handle<String> module_bytes_string =
+        factory->NewStringFromOneByte(module_bytes_vec, TENURED)
+            .ToHandleChecked();
+    ret->set_module_bytes(module_bytes_string);
+  }
+
+  Handle<ByteArray> function_name_table =
+      BuildFunctionNamesTable(isolate, module_env.module);
+  ret->set_function_names(function_name_table);
+  if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
+  DCHECK_EQ(ret->default_mem_size(), temp_instance.mem_size);
+  return ret;
 }
 
-// Instantiates a wasm module as a JSObject.
-//  * allocates a backing store of {mem_size} bytes.
-//  * installs a named property "memory" for that buffer if exported
-//  * installs named properties on the object for exported functions
-//  * compiles wasm code to machine code
-MaybeHandle<JSObject> WasmModule::Instantiate(
-    Isolate* isolate, Handle<FixedArray> compiled_module,
-    Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory) {
+// Instantiates a WASM module, creating a WebAssembly.Instance from a
+// WebAssembly.Module.
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+                                              ErrorThrower* thrower,
+                                              Handle<JSObject> module_object,
+                                              Handle<JSReceiver> ffi,
+                                              Handle<JSArrayBuffer> memory) {
+  MaybeHandle<JSObject> nothing;
   HistogramTimerScope wasm_instantiate_module_time_scope(
       isolate->counters()->wasm_instantiate_module_time());
-  ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
   Factory* factory = isolate->factory();
 
-  compiled_module = CloneModuleForInstance(isolate, compiled_module);
+  //--------------------------------------------------------------------------
+  // Reuse the compiled module (if no owner), otherwise clone.
+  //--------------------------------------------------------------------------
+  Handle<WasmCompiledModule> compiled_module;
+  Handle<FixedArray> code_table;
+  Handle<FixedArray> old_code_table;
+  Handle<JSObject> owner;
+  // If we don't clone, this will be null(). Otherwise, this will
+  // be a weak link to the original. If we lose the original to GC,
+  // this will be a cleared. We'll link the instances chain last.
+  MaybeHandle<WeakCell> link_to_original;
 
-  // These fields are compulsory.
-  Handle<FixedArray> code_table =
-      compiled_module->GetValueChecked<FixedArray>(isolate, kFunctions);
+  TRACE("Starting new module instantiation\n");
+  {
+    Handle<WasmCompiledModule> original(
+        WasmCompiledModule::cast(module_object->GetInternalField(0)), isolate);
+    // Always make a new copy of the code_table, since the old_code_table
+    // may still have placeholders for imports.
+    old_code_table = original->code_table();
+    code_table = factory->CopyFixedArray(old_code_table);
 
-  std::vector<Handle<Code>> functions(
-      static_cast<size_t>(code_table->length()));
-  for (int i = 0; i < code_table->length(); ++i) {
-    functions[static_cast<size_t>(i)] =
-        code_table->GetValueChecked<Code>(isolate, i);
+    if (original->has_weak_owning_instance()) {
+      WeakCell* tmp = original->ptr_to_weak_owning_instance();
+      DCHECK(!tmp->cleared());
+      // There is already an owner, clone everything.
+      owner = Handle<JSObject>(JSObject::cast(tmp->value()), isolate);
+      // Insert the latest clone in front.
+      TRACE("Cloning from %d\n", original->instance_id());
+      compiled_module = WasmCompiledModule::Clone(isolate, original);
+      // Replace the strong reference to point to the new instance here.
+      // This allows any of the other instances, including the original,
+      // to be collected.
+      module_object->SetInternalField(0, *compiled_module);
+      compiled_module->set_weak_module_object(original->weak_module_object());
+      link_to_original = factory->NewWeakCell(original);
+      // Don't link to original here. We remember the original
+      // as a weak link. If that link isn't clear by the time we finish
+      // instantiating this instance, then we link it at that time.
+      compiled_module->reset_weak_next_instance();
+
+      // Clone the code for WASM functions and exports.
+      for (int i = 0; i < code_table->length(); ++i) {
+        Handle<Code> orig_code = code_table->GetValueChecked<Code>(isolate, i);
+        switch (orig_code->kind()) {
+          case Code::WASM_TO_JS_FUNCTION:
+            // Imports will be overwritten with newly compiled wrappers.
+            break;
+          case Code::JS_TO_WASM_FUNCTION:
+          case Code::WASM_FUNCTION: {
+            Handle<Code> code = factory->CopyCode(orig_code);
+            code_table->set(i, *code);
+            break;
+          }
+          default:
+            UNREACHABLE();
+        }
+      }
+      RecordStats(isolate, code_table);
+    } else {
+      // There was no owner, so we can reuse the original.
+      compiled_module = original;
+      TRACE("Reusing existing instance %d\n", compiled_module->instance_id());
+    }
+    compiled_module->set_code_table(code_table);
   }
-  LinkModuleFunctions(isolate, functions);
 
-  RecordStats(isolate, code_table);
-
-  MaybeHandle<JSObject> nothing;
-
+  //--------------------------------------------------------------------------
+  // Allocate the instance object.
+  //--------------------------------------------------------------------------
   Handle<Map> map = factory->NewMap(
       JS_OBJECT_TYPE,
       JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
-  Handle<JSObject> js_object = factory->NewJSObjectFromMap(map, TENURED);
-  js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
+  Handle<JSObject> instance = factory->NewJSObjectFromMap(map, TENURED);
+  instance->SetInternalField(kWasmModuleCodeTable, *code_table);
 
-  if (!(SetupInstanceHeap(isolate, compiled_module, js_object, memory,
-                          &thrower) &&
-        SetupGlobals(isolate, compiled_module, js_object, &thrower) &&
-        SetupImports(isolate, compiled_module, js_object, &thrower, ffi) &&
-        SetupExportsObject(compiled_module, isolate, js_object, &thrower))) {
-    return nothing;
+  //--------------------------------------------------------------------------
+  // Set up the memory for the new instance.
+  //--------------------------------------------------------------------------
+  MaybeHandle<JSArrayBuffer> old_memory;
+  // TODO(titzer): handle imported memory properly.
+
+  uint32_t min_mem_pages = compiled_module->min_memory_pages();
+  isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+  // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
+
+  if (memory.is_null() && min_mem_pages > 0) {
+    memory = AllocateMemory(thrower, isolate, min_mem_pages);
+    if (memory.is_null()) return nothing;  // failed to allocate memory
   }
 
-  SetDebugSupport(factory, compiled_module, js_object);
+  if (!memory.is_null()) {
+    instance->SetInternalField(kWasmMemArrayBuffer, *memory);
+    Address mem_start = static_cast<Address>(memory->backing_store());
+    uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
+    LoadDataSegments(compiled_module, mem_start, mem_size);
 
-  FlushAssemblyCache(isolate, code_table);
+    uint32_t old_mem_size = compiled_module->has_heap()
+                                ? compiled_module->mem_size()
+                                : compiled_module->default_mem_size();
+    Address old_mem_start =
+        compiled_module->has_heap()
+            ? static_cast<Address>(compiled_module->heap()->backing_store())
+            : nullptr;
+    RelocateInstanceCode(instance, old_mem_start, mem_start, old_mem_size,
+                         mem_size);
+    compiled_module->set_heap(memory);
+  }
 
-  MaybeHandle<FixedArray> maybe_indirect_tables =
-      compiled_module->GetValue<FixedArray>(isolate,
-                                            kTableOfIndirectFunctionTables);
-  Handle<FixedArray> indirect_tables_template;
-  if (maybe_indirect_tables.ToHandle(&indirect_tables_template)) {
-    Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
-        isolate, code_table, indirect_tables_template);
-    for (int i = 0; i < indirect_tables->length(); ++i) {
-      Handle<FixedArray> metadata =
-          indirect_tables->GetValueChecked<FixedArray>(isolate, i);
-      uint32_t size = Smi::cast(metadata->get(kSize))->value();
-      Handle<FixedArray> table =
-          metadata->GetValueChecked<FixedArray>(isolate, kTable);
-      wasm::PopulateFunctionTable(table, size, &functions);
+  //--------------------------------------------------------------------------
+  // Set up the globals for the new instance.
+  //--------------------------------------------------------------------------
+  MaybeHandle<JSArrayBuffer> old_globals;
+  MaybeHandle<JSArrayBuffer> globals;
+  uint32_t globals_size = compiled_module->globals_size();
+  if (globals_size > 0) {
+    Handle<JSArrayBuffer> global_buffer = NewArrayBuffer(isolate, globals_size);
+    globals = global_buffer;
+    if (globals.is_null()) {
+      thrower->Error("Out of memory: wasm globals");
+      return nothing;
     }
-    js_object->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+    Address old_address =
+        owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
+                                        *isolate->factory()->undefined_value(),
+                                        JSObject::cast(*owner));
+    RelocateGlobals(instance, old_address,
+                    static_cast<Address>(global_buffer->backing_store()));
+    instance->SetInternalField(kWasmGlobalsArrayBuffer, *global_buffer);
   }
 
+  //--------------------------------------------------------------------------
+  // Compile the import wrappers for the new instance.
+  //--------------------------------------------------------------------------
+  // TODO(titzer): handle imported globals and function tables.
+  int num_imported_functions = 0;
+  if (compiled_module->has_import_data()) {
+    Handle<FixedArray> import_data = compiled_module->import_data();
+    num_imported_functions = import_data->length();
+    for (int index = 0; index < num_imported_functions; index++) {
+      Handle<Code> import_wrapper =
+          CompileImportWrapper(isolate, ffi, index, import_data, thrower);
+      if (thrower->error()) return nothing;
+      code_table->set(index, *import_wrapper);
+      RecordStats(isolate, *import_wrapper);
+    }
+  }
+
+  //--------------------------------------------------------------------------
+  // Set up the debug support for the new instance.
+  //--------------------------------------------------------------------------
+  // TODO(wasm): avoid referencing this stuff from the instance, use it off
+  // the compiled module instead. See the following 3 assignments:
+  if (compiled_module->has_module_bytes()) {
+    instance->SetInternalField(kWasmModuleBytesString,
+                               compiled_module->ptr_to_module_bytes());
+  }
+
+  if (compiled_module->has_function_names()) {
+    instance->SetInternalField(kWasmFunctionNamesArray,
+                               compiled_module->ptr_to_function_names());
+  }
+
+  {
+    Handle<Object> handle = factory->NewNumber(num_imported_functions);
+    instance->SetInternalField(kWasmNumImportedFunctions, *handle);
+  }
+
+  //--------------------------------------------------------------------------
+  // Set up the runtime support for the new instance.
+  //--------------------------------------------------------------------------
+  Handle<WeakCell> weak_link = isolate->factory()->NewWeakCell(instance);
+
+  for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
+       i < code_table->length(); ++i) {
+    Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+    if (code->kind() == Code::WASM_FUNCTION) {
+      Handle<FixedArray> deopt_data =
+          isolate->factory()->NewFixedArray(2, TENURED);
+      deopt_data->set(0, *weak_link);
+      deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+      deopt_data->set_length(2);
+      code->set_deoptimization_data(*deopt_data);
+    }
+  }
+
+  //--------------------------------------------------------------------------
+  // Set up the indirect function tables for the new instance.
+  //--------------------------------------------------------------------------
+  {
+    std::vector<Handle<Code>> functions(
+        static_cast<size_t>(code_table->length()));
+    for (int i = 0; i < code_table->length(); ++i) {
+      functions[i] = code_table->GetValueChecked<Code>(isolate, i);
+    }
+
+    if (compiled_module->has_indirect_function_tables()) {
+      Handle<FixedArray> indirect_tables_template =
+          compiled_module->indirect_function_tables();
+      Handle<FixedArray> to_replace =
+          owner.is_null() ? indirect_tables_template
+                          : handle(FixedArray::cast(owner->GetInternalField(
+                                kWasmModuleFunctionTable)));
+      Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
+          isolate, code_table, indirect_tables_template, to_replace);
+      for (int i = 0; i < indirect_tables->length(); ++i) {
+        Handle<FixedArray> metadata =
+            indirect_tables->GetValueChecked<FixedArray>(isolate, i);
+        uint32_t size = Smi::cast(metadata->get(kSize))->value();
+        Handle<FixedArray> table =
+            metadata->GetValueChecked<FixedArray>(isolate, kTable);
+        PopulateFunctionTable(table, size, &functions);
+      }
+      instance->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+    }
+  }
+
+  //--------------------------------------------------------------------------
+  // Set up the exports object for the new instance.
+  //--------------------------------------------------------------------------
+  bool mem_export = compiled_module->export_memory();
+  ModuleOrigin origin = compiled_module->origin();
+
+  if (compiled_module->has_exports() || mem_export) {
+    PropertyDescriptor desc;
+    desc.set_writable(false);
+
+    Handle<JSObject> exports_object = instance;
+    if (origin == kWasmOrigin) {
+      // Create the "exports" object.
+      Handle<JSFunction> object_function = Handle<JSFunction>(
+          isolate->native_context()->object_function(), isolate);
+      exports_object = factory->NewJSObject(object_function, TENURED);
+      Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+      JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+    }
+    int first_export = -1;
+    // TODO(wasm): another iteration over the code objects.
+    for (int i = 0; i < code_table->length(); i++) {
+      Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+      if (code->kind() == Code::JS_TO_WASM_FUNCTION) {
+        first_export = i;
+        break;
+      }
+    }
+    if (compiled_module->has_exports()) {
+      Handle<FixedArray> exports = compiled_module->exports();
+      int export_size = exports->length();
+      for (int i = 0; i < export_size; ++i) {
+        Handle<FixedArray> export_data =
+            exports->GetValueChecked<FixedArray>(isolate, i);
+        Handle<String> name =
+            export_data->GetValueChecked<String>(isolate, kExportName);
+        int arity = Smi::cast(export_data->get(kExportArity))->value();
+        MaybeHandle<ByteArray> signature =
+            export_data->GetValue<ByteArray>(isolate, kExportedSignature);
+        Handle<Code> export_code =
+            code_table->GetValueChecked<Code>(isolate, first_export + i);
+        Handle<JSFunction> function = WrapExportCodeAsJSFunction(
+            isolate, export_code, name, arity, signature, instance);
+        desc.set_value(function);
+        Maybe<bool> status = JSReceiver::DefineOwnProperty(
+            isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
+        if (!status.IsJust()) {
+          thrower->Error("export of %.*s failed.", name->length(),
+                         name->ToCString().get());
+          return nothing;
+        }
+      }
+    }
+    if (mem_export) {
+      // Export the memory as a named property.
+      Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>(
+          JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
+      Handle<Object> memory_object =
+          WasmJs::CreateWasmMemoryObject(isolate, buffer, false, 0);
+      // TODO(titzer): export the memory with the correct name.
+      Handle<String> name = factory->InternalizeUtf8String("memory");
+      JSObject::AddProperty(exports_object, name, memory_object, READ_ONLY);
+    }
+  }
+
+  if (num_imported_functions > 0 || !owner.is_null()) {
+    // If the code was cloned, or new imports were compiled, patch.
+    PatchDirectCalls(old_code_table, code_table, num_imported_functions);
+  }
+
+  FlushICache(isolate, code_table);
+
+  //--------------------------------------------------------------------------
   // Run the start function if one was specified.
-  MaybeHandle<FixedArray> maybe_startup_fct =
-      compiled_module->GetValue<FixedArray>(isolate, kStartupFunction);
-  Handle<FixedArray> metadata;
-  if (maybe_startup_fct.ToHandle(&metadata)) {
+  //--------------------------------------------------------------------------
+  if (compiled_module->has_startup_function()) {
+    Handle<FixedArray> startup_data = compiled_module->startup_function();
     HandleScope scope(isolate);
+    int32_t start_index =
+        startup_data->GetValueChecked<Smi>(isolate, kExportedFunctionIndex)
+            ->value();
     Handle<Code> startup_code =
-        metadata->GetValueChecked<Code>(isolate, kExportCode);
-    int arity = Smi::cast(metadata->get(kExportArity))->value();
+        code_table->GetValueChecked<Code>(isolate, start_index);
+    int arity = Smi::cast(startup_data->get(kExportArity))->value();
     MaybeHandle<ByteArray> startup_signature =
-        metadata->GetValue<ByteArray>(isolate, kExportedSignature);
+        startup_data->GetValue<ByteArray>(isolate, kExportedSignature);
     Handle<JSFunction> startup_fct = WrapExportCodeAsJSFunction(
         isolate, startup_code, factory->InternalizeUtf8String("start"), arity,
-        startup_signature, js_object);
+        startup_signature, instance);
     RecordStats(isolate, *startup_code);
     // Call the JS function.
     Handle<Object> undefined = isolate->factory()->undefined_value();
@@ -1403,35 +1507,86 @@
         Execution::Call(isolate, startup_fct, undefined, 0, nullptr);
 
     if (retval.is_null()) {
-      thrower.Error("WASM.instantiateModule(): start function failed");
+      thrower->Error("WASM.instantiateModule(): start function failed");
       return nothing;
     }
   }
 
-  DCHECK(wasm::IsWasmObject(*js_object));
-  return js_object;
+  DCHECK(wasm::IsWasmObject(*instance));
+
+  {
+    Handle<WeakCell> link_to_owner = factory->NewWeakCell(instance);
+
+    Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
+    Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module);
+    {
+      DisallowHeapAllocation no_gc;
+      compiled_module->set_weak_owning_instance(link_to_owner);
+      Handle<WeakCell> next;
+      if (link_to_original.ToHandle(&next) && !next->cleared()) {
+        WasmCompiledModule* original = WasmCompiledModule::cast(next->value());
+        DCHECK(original->has_weak_owning_instance());
+        DCHECK(!original->weak_owning_instance()->cleared());
+        compiled_module->set_weak_next_instance(next);
+        original->set_weak_prev_instance(link_to_clone);
+      }
+
+      compiled_module->set_weak_owning_instance(link_to_owner);
+      instance->SetInternalField(kWasmCompiledModule, *compiled_module);
+      GlobalHandles::MakeWeak(global_handle.location(),
+                              global_handle.location(), &InstanceFinalizer,
+                              v8::WeakCallbackType::kFinalizer);
+    }
+  }
+  TRACE("Finishing instance %d\n", compiled_module->instance_id());
+  TRACE_CHAIN(WasmCompiledModule::cast(module_object->GetInternalField(0)));
+  return instance;
 }
 
-// TODO(mtrofin): remove this once we move to WASM_DIRECT_CALL
-Handle<Code> ModuleEnv::GetCodeOrPlaceholder(uint32_t index) const {
-  DCHECK(IsValidFunction(index));
-  if (!placeholders.empty()) return placeholders[index];
-  DCHECK_NOT_NULL(instance);
-  return instance->function_code[index];
+#if DEBUG
+uint32_t WasmCompiledModule::instance_id_counter_ = 0;
+#endif
+
+Handle<WasmCompiledModule> WasmCompiledModule::New(Isolate* isolate,
+                                                   uint32_t min_memory_pages,
+                                                   uint32_t globals_size,
+                                                   bool export_memory,
+                                                   ModuleOrigin origin) {
+  Handle<FixedArray> ret =
+      isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
+  // Globals size is expected to fit into an int without overflow. This is not
+  // supported by the spec at the moment, however, we don't support array
+  // buffer sizes over 1g, so, for now, we avoid alocating a HeapNumber for
+  // the globals size. The CHECK guards this assumption.
+  CHECK_GE(static_cast<int>(globals_size), 0);
+  ret->set(kID_min_memory_pages,
+           Smi::FromInt(static_cast<int>(min_memory_pages)));
+  ret->set(kID_globals_size, Smi::FromInt(static_cast<int>(globals_size)));
+  ret->set(kID_export_memory, Smi::FromInt(static_cast<int>(export_memory)));
+  ret->set(kID_origin, Smi::FromInt(static_cast<int>(origin)));
+  WasmCompiledModule::cast(*ret)->Init();
+  return handle(WasmCompiledModule::cast(*ret));
 }
 
-Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
-  DCHECK(IsValidImport(index));
-  return instance ? instance->import_code[index] : Handle<Code>::null();
+void WasmCompiledModule::Init() {
+#if DEBUG
+  set(kID_instance_id, Smi::FromInt(instance_id_counter_++));
+  TRACE("New compiled module id: %d\n", instance_id());
+#endif
 }
 
-compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
-                                                       uint32_t index) {
-  DCHECK(IsValidFunction(index));
-  // Always make a direct call to whatever is in the table at that location.
-  // A wrapper will be generated for FFI calls.
-  const WasmFunction* function = &module->functions[index];
-  return GetWasmCallDescriptor(zone, function->sig);
+void WasmCompiledModule::PrintInstancesChain() {
+#if DEBUG
+  if (!FLAG_trace_wasm_instances) return;
+  for (WasmCompiledModule* current = this; current != nullptr;) {
+    PrintF("->%d", current->instance_id());
+    if (current->ptr_to_weak_next_instance() == nullptr) break;
+    CHECK(!current->ptr_to_weak_next_instance()->cleared());
+    current =
+        WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
+  }
+  PrintF("\n");
+#endif
 }
 
 Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
@@ -1577,93 +1732,188 @@
   return ByteArray::cast(func_names_obj)->get_int(0);
 }
 
-Handle<JSObject> CreateCompiledModuleObject(
-    Isolate* isolate, Handle<FixedArray> compiled_module) {
-  Handle<JSFunction> module_cons(
-      isolate->native_context()->wasm_module_constructor());
-  Handle<JSObject> module_obj = isolate->factory()->NewJSObject(module_cons);
+Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
+                                            Handle<FixedArray> compiled_module,
+                                            ModuleOrigin origin) {
+  Handle<JSObject> module_obj;
+  if (origin == ModuleOrigin::kWasmOrigin) {
+    Handle<JSFunction> module_cons(
+        isolate->native_context()->wasm_module_constructor());
+    module_obj = isolate->factory()->NewJSObject(module_cons);
+  } else {
+    DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
+    Handle<Map> map = isolate->factory()->NewMap(
+        JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
+    module_obj = isolate->factory()->NewJSObjectFromMap(map, TENURED);
+  }
   module_obj->SetInternalField(0, *compiled_module);
-  Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
-  Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
+  if (origin == ModuleOrigin::kWasmOrigin) {
+    Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
+    Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
+  }
+  Handle<WeakCell> link_to_module = isolate->factory()->NewWeakCell(module_obj);
+  WasmCompiledModule::cast(*compiled_module)
+      ->set_weak_module_object(link_to_module);
   return module_obj;
 }
 
+MaybeHandle<JSObject> CreateModuleObjectFromBytes(Isolate* isolate,
+                                                  const byte* start,
+                                                  const byte* end,
+                                                  ErrorThrower* thrower,
+                                                  ModuleOrigin origin) {
+  MaybeHandle<JSObject> nothing;
+  Zone zone(isolate->allocator());
+  ModuleResult result =
+      DecodeWasmModule(isolate, &zone, start, end, false, origin);
+  std::unique_ptr<const WasmModule> decoded_module(result.val);
+  if (result.failed()) {
+    thrower->Failed("Wasm decoding failed", result);
+    return nothing;
+  }
+  MaybeHandle<FixedArray> compiled_module =
+      decoded_module->CompileFunctions(isolate, thrower);
+  if (compiled_module.is_null()) return nothing;
+
+  return CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked(),
+                                    origin);
+}
+
+bool ValidateModuleBytes(Isolate* isolate, const byte* start, const byte* end,
+                         ErrorThrower* thrower, ModuleOrigin origin) {
+  Zone zone(isolate->allocator());
+  ModuleResult result =
+      DecodeWasmModule(isolate, &zone, start, end, false, origin);
+  if (result.ok()) {
+    DCHECK_NOT_NULL(result.val);
+    delete result.val;
+    return true;
+  }
+  return false;
+}
+
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
+                                             Handle<JSObject> instance) {
+  Object* mem = instance->GetInternalField(kWasmMemArrayBuffer);
+  DCHECK(IsWasmObject(*instance));
+  if (mem->IsUndefined(isolate)) return MaybeHandle<JSArrayBuffer>();
+  return Handle<JSArrayBuffer>(JSArrayBuffer::cast(mem));
+}
+
+void SetInstanceMemory(Handle<JSObject> instance, JSArrayBuffer* buffer) {
+  DisallowHeapAllocation no_gc;
+  DCHECK(IsWasmObject(*instance));
+  instance->SetInternalField(kWasmMemArrayBuffer, buffer);
+  WasmCompiledModule* module =
+      WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
+  module->set_ptr_to_heap(buffer);
+}
+
+int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance) {
+  MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
+      GetInstanceMemory(isolate, instance);
+  Handle<JSArrayBuffer> buffer;
+  if (!maybe_mem_buffer.ToHandle(&buffer)) {
+    return 0;
+  } else {
+    return buffer->byte_length()->Number() / WasmModule::kPageSize;
+  }
+}
+
+int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
+                           uint32_t pages) {
+  Address old_mem_start = nullptr;
+  uint32_t old_size = 0, new_size = 0;
+
+  MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
+      GetInstanceMemory(isolate, instance);
+  Handle<JSArrayBuffer> old_buffer;
+  if (!maybe_mem_buffer.ToHandle(&old_buffer)) {
+    // If module object does not have linear memory associated with it,
+    // Allocate new array buffer of given size.
+    // TODO(gdeepti): Fix bounds check to take into account size of memtype.
+    new_size = pages * WasmModule::kPageSize;
+    // The code generated in the wasm compiler guarantees this precondition.
+    DCHECK(pages <= WasmModule::kMaxMemPages);
+  } else {
+    old_mem_start = static_cast<Address>(old_buffer->backing_store());
+    old_size = old_buffer->byte_length()->Number();
+    // If the old memory was zero-sized, we should have been in the
+    // "undefined" case above.
+    DCHECK_NOT_NULL(old_mem_start);
+    DCHECK_NE(0, old_size);
+    DCHECK(old_size + pages * WasmModule::kPageSize <=
+           std::numeric_limits<uint32_t>::max());
+    new_size = old_size + pages * WasmModule::kPageSize;
+  }
+
+  if (new_size <= old_size ||
+      WasmModule::kMaxMemPages * WasmModule::kPageSize <= new_size) {
+    return -1;
+  }
+  Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
+  if (buffer.is_null()) return -1;
+  Address new_mem_start = static_cast<Address>(buffer->backing_store());
+  if (old_size != 0) {
+    memcpy(new_mem_start, old_mem_start, old_size);
+  }
+  SetInstanceMemory(instance, *buffer);
+  if (!UpdateWasmModuleMemory(instance, old_mem_start, new_mem_start, old_size,
+                              new_size)) {
+    return -1;
+  }
+  DCHECK(old_size % WasmModule::kPageSize == 0);
+  return (old_size / WasmModule::kPageSize);
+}
+
 namespace testing {
 
-int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
-                                const byte* module_end, bool asm_js) {
-  HandleScope scope(isolate);
-  Zone zone(isolate->allocator());
-  ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
-
-  // Decode the module, but don't verify function bodies, since we'll
-  // be compiling them anyway.
-  ModuleResult decoding_result =
-      DecodeWasmModule(isolate, &zone, module_start, module_end, false,
-                       asm_js ? kAsmJsOrigin : kWasmOrigin);
-
-  std::unique_ptr<const WasmModule> module(decoding_result.val);
-  if (decoding_result.failed()) {
-    // Module verification failed. throw.
-    thrower.Error("WASM.compileRun() failed: %s",
-                  decoding_result.error_msg.get());
-    return -1;
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+                            int instance_count) {
+  CHECK_GE(instance_count, 0);
+  DisallowHeapAllocation no_gc;
+  WasmCompiledModule* compiled_module =
+      WasmCompiledModule::cast(module_obj->GetInternalField(0));
+  CHECK_EQ(
+      JSObject::cast(compiled_module->ptr_to_weak_module_object()->value()),
+      *module_obj);
+  Object* prev = nullptr;
+  int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
+  WasmCompiledModule* current_instance = compiled_module;
+  while (current_instance->has_weak_next_instance()) {
+    CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
+          current_instance->ptr_to_weak_prev_instance()->value() == prev);
+    CHECK_EQ(current_instance->ptr_to_weak_module_object()->value(),
+             *module_obj);
+    CHECK(
+        IsWasmObject(current_instance->ptr_to_weak_owning_instance()->value()));
+    prev = current_instance;
+    current_instance = WasmCompiledModule::cast(
+        current_instance->ptr_to_weak_next_instance()->value());
+    ++found_instances;
+    CHECK_LE(found_instances, instance_count);
   }
-
-  if (module->import_table.size() > 0) {
-    thrower.Error("Not supported: module has imports.");
-  }
-  if (module->export_table.size() == 0) {
-    thrower.Error("Not supported: module has no exports.");
-  }
-
-  if (thrower.error()) return -1;
-  MaybeHandle<FixedArray> compiled_module =
-      module->CompileFunctions(isolate, &thrower);
-
-  if (compiled_module.is_null()) return -1;
-  Handle<JSObject> instance =
-      WasmModule::Instantiate(isolate, compiled_module.ToHandleChecked(),
-                              Handle<JSReceiver>::null(),
-                              Handle<JSArrayBuffer>::null())
-          .ToHandleChecked();
-
-  return CallFunction(isolate, instance, &thrower, "main", 0, nullptr);
+  CHECK_EQ(found_instances, instance_count);
 }
 
-int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
-                     ErrorThrower* thrower, const char* name, int argc,
-                     Handle<Object> argv[]) {
-  Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
-  Handle<JSObject> exports_object = Handle<JSObject>::cast(
-      JSObject::GetProperty(instance, exports).ToHandleChecked());
-  Handle<Name> main_name = isolate->factory()->NewStringFromAsciiChecked(name);
-  PropertyDescriptor desc;
-  Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
-      isolate, exports_object, main_name, &desc);
-  if (!property_found.FromMaybe(false)) return -1;
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj) {
+  DisallowHeapAllocation no_gc;
+  WasmCompiledModule* compiled_module =
+      WasmCompiledModule::cast(module_obj->GetInternalField(0));
+  CHECK(compiled_module->has_weak_module_object());
+  CHECK_EQ(compiled_module->ptr_to_weak_module_object()->value(), *module_obj);
+  CHECK(!compiled_module->has_weak_prev_instance());
+  CHECK(!compiled_module->has_weak_next_instance());
+  CHECK(!compiled_module->has_weak_owning_instance());
+}
 
-  Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
-
-  // Call the JS function.
-  Handle<Object> undefined = isolate->factory()->undefined_value();
-  MaybeHandle<Object> retval =
-      Execution::Call(isolate, main_export, undefined, argc, argv);
-
-  // The result should be a number.
-  if (retval.is_null()) {
-    thrower->Error("WASM.compileRun() failed: Invocation was null");
-    return -1;
-  }
-  Handle<Object> result = retval.ToHandleChecked();
-  if (result->IsSmi()) {
-    return Smi::cast(*result)->value();
-  }
-  if (result->IsHeapNumber()) {
-    return static_cast<int32_t>(HeapNumber::cast(*result)->value());
-  }
-  thrower->Error("WASM.compileRun() failed: Return value should be number");
-  return -1;
+void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance) {
+  DisallowHeapAllocation no_gc;
+  CHECK(IsWasmObject(*instance));
+  WasmCompiledModule* compiled_module =
+      WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
+  CHECK(compiled_module->has_weak_module_object());
+  CHECK(compiled_module->ptr_to_weak_module_object()->cleared());
 }
 
 }  // namespace testing
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 0c3df51..ac75042 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -27,84 +27,71 @@
 const size_t kMaxFunctionSize = 128 * 1024;
 const size_t kMaxStringSize = 256;
 const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0b;
+const uint32_t kWasmVersion = 0x0c;
+
 const uint8_t kWasmFunctionTypeForm = 0x40;
+const uint8_t kWasmAnyFunctionTypeForm = 0x20;
 
-// WebAssembly sections are named as strings in the binary format, but
-// internally V8 uses an enum to handle them.
-//
-// Entries have the form F(enumerator, string).
-#define FOR_EACH_WASM_SECTION_TYPE(F)  \
-  F(Signatures, 1, "type")             \
-  F(ImportTable, 2, "import")          \
-  F(FunctionSignatures, 3, "function") \
-  F(FunctionTable, 4, "table")         \
-  F(Memory, 5, "memory")               \
-  F(ExportTable, 6, "export")          \
-  F(StartFunction, 7, "start")         \
-  F(FunctionBodies, 8, "code")         \
-  F(DataSegments, 9, "data")           \
-  F(Names, 10, "name")                 \
-  F(Globals, 0, "global")              \
-  F(End, 0, "end")
+enum WasmSectionCode {
+  kUnknownSectionCode = 0,   // code for unknown sections
+  kTypeSectionCode = 1,      // Function signature declarations
+  kImportSectionCode = 2,    // Import declarations
+  kFunctionSectionCode = 3,  // Function declarations
+  kTableSectionCode = 4,     // Indirect function table and other tables
+  kMemorySectionCode = 5,    // Memory attributes
+  kGlobalSectionCode = 6,    // Global declarations
+  kExportSectionCode = 7,    // Exports
+  kStartSectionCode = 8,     // Start function declaration
+  kElementSectionCode = 9,   // Elements section
+  kCodeSectionCode = 10,     // Function code
+  kDataSectionCode = 11,     // Data segments
+  kNameSectionCode = 12,     // Name section (encoded as a string)
+};
 
-// Contants for the above section types: {LEB128 length, characters...}.
-#define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
-#define WASM_SECTION_SIGNATURES 4, 't', 'y', 'p', 'e'
-#define WASM_SECTION_GLOBALS 6, 'g', 'l', 'o', 'b', 'a', 'l'
-#define WASM_SECTION_DATA_SEGMENTS 4, 'd', 'a', 't', 'a'
-#define WASM_SECTION_FUNCTION_TABLE 5, 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_END 3, 'e', 'n', 'd'
-#define WASM_SECTION_START_FUNCTION 5, 's', 't', 'a', 'r', 't'
-#define WASM_SECTION_IMPORT_TABLE 6, 'i', 'm', 'p', 'o', 'r', 't'
-#define WASM_SECTION_EXPORT_TABLE 6, 'e', 'x', 'p', 'o', 'r', 't'
-#define WASM_SECTION_FUNCTION_SIGNATURES \
-  8, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
-#define WASM_SECTION_FUNCTION_BODIES 4, 'c', 'o', 'd', 'e'
-#define WASM_SECTION_NAMES 4, 'n', 'a', 'm', 'e'
+inline bool IsValidSectionCode(uint8_t byte) {
+  return kTypeSectionCode <= byte && byte <= kDataSectionCode;
+}
 
-// Constants for the above section headers' size (LEB128 + characters).
-#define WASM_SECTION_MEMORY_SIZE ((size_t)7)
-#define WASM_SECTION_SIGNATURES_SIZE ((size_t)5)
-#define WASM_SECTION_GLOBALS_SIZE ((size_t)7)
-#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)5)
-#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)6)
-#define WASM_SECTION_END_SIZE ((size_t)4)
-#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)6)
-#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)7)
-#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)7)
-#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)9)
-#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)5)
-#define WASM_SECTION_NAMES_SIZE ((size_t)5)
+const char* SectionName(WasmSectionCode code);
 
 class WasmDebugInfo;
 
-struct WasmSection {
-  enum class Code : uint32_t {
-#define F(enumerator, order, string) enumerator,
-    FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
-        Max
-  };
-  static WasmSection::Code begin();
-  static WasmSection::Code end();
-  static WasmSection::Code next(WasmSection::Code code);
-  static const char* getName(Code code);
-  static int getOrder(Code code);
-  static size_t getNameLength(Code code);
-  static WasmSection::Code lookup(const byte* string, uint32_t length);
-};
-
-enum WasmFunctionDeclBit {
-  kDeclFunctionName = 0x01,
-  kDeclFunctionExport = 0x08
-};
-
 // Constants for fixed-size elements within a module.
-static const size_t kDeclMemorySize = 3;
-static const size_t kDeclDataSegmentSize = 13;
-
 static const uint32_t kMaxReturnCount = 1;
+static const uint8_t kResizableMaximumFlag = 1;
+static const int32_t kInvalidFunctionIndex = -1;
+
+enum WasmExternalKind {
+  kExternalFunction = 0,
+  kExternalTable = 1,
+  kExternalMemory = 2,
+  kExternalGlobal = 3
+};
+
+// Representation of an initializer expression.
+struct WasmInitExpr {
+  enum WasmInitKind {
+    kNone,
+    kGlobalIndex,
+    kI32Const,
+    kI64Const,
+    kF32Const,
+    kF64Const
+  } kind;
+
+  union {
+    int32_t i32_const;
+    int64_t i64_const;
+    float f32_const;
+    double f64_const;
+    uint32_t global_index;
+  } val;
+};
+
+#define NO_INIT                 \
+  {                             \
+    WasmInitExpr::kNone, { 0u } \
+  }
 
 // Static representation of a WASM function.
 struct WasmFunction {
@@ -115,54 +102,69 @@
   uint32_t name_length;  // length in bytes of the name.
   uint32_t code_start_offset;    // offset in the module bytes of code start.
   uint32_t code_end_offset;      // offset in the module bytes of code end.
-};
-
-// Static representation of an imported WASM function.
-struct WasmImport {
-  FunctionSig* sig;               // signature of the function.
-  uint32_t sig_index;             // index into the signature table.
-  uint32_t module_name_offset;    // offset in module bytes of the module name.
-  uint32_t module_name_length;    // length in bytes of the module name.
-  uint32_t function_name_offset;  // offset in module bytes of the import name.
-  uint32_t function_name_length;  // length in bytes of the import name.
-};
-
-// Static representation of an exported WASM function.
-struct WasmExport {
-  uint32_t func_index;   // index into the function table.
-  uint32_t name_offset;  // offset in module bytes of the name to export.
-  uint32_t name_length;  // length in bytes of the exported name.
+  bool imported;
+  bool exported;
 };
 
 // Static representation of a wasm global variable.
 struct WasmGlobal {
-  uint32_t name_offset;  // offset in the module bytes of the name, if any.
-  uint32_t name_length;  // length in bytes of the global name.
   LocalType type;        // type of the global.
-  uint32_t offset;       // offset from beginning of globals area.
-  bool exported;         // true if this global is exported.
+  bool mutability;       // {true} if mutable.
+  WasmInitExpr init;     // the initialization expression of the global.
+  uint32_t offset;       // offset into global memory.
+  bool imported;         // true if imported.
+  bool exported;         // true if exported.
 };
 
 // Static representation of a wasm data segment.
 struct WasmDataSegment {
-  uint32_t dest_addr;      // destination memory address of the data.
+  WasmInitExpr dest_addr;  // destination memory address of the data.
   uint32_t source_offset;  // start offset in the module bytes.
   uint32_t source_size;    // end offset in the module bytes.
-  bool init;               // true if loaded upon instantiation.
 };
 
 // Static representation of a wasm indirect call table.
 struct WasmIndirectFunctionTable {
-  uint32_t size;                 // initial table size.
-  uint32_t max_size;             // maximum table size.
-  std::vector<uint16_t> values;  // function table.
+  uint32_t size;                // initial table size.
+  uint32_t max_size;            // maximum table size.
+  std::vector<int32_t> values;  // function table, -1 indicating invalid.
+  bool imported;                // true if imported.
+  bool exported;                // true if exported.
+};
+
+// Static representation of how to initialize a table.
+struct WasmTableInit {
+  uint32_t table_index;
+  WasmInitExpr offset;
+  std::vector<uint32_t> entries;
+};
+
+// Static representation of a WASM import.
+struct WasmImport {
+  uint32_t module_name_length;  // length in bytes of the module name.
+  uint32_t module_name_offset;  // offset in module bytes of the module name.
+  uint32_t field_name_length;   // length in bytes of the import name.
+  uint32_t field_name_offset;   // offset in module bytes of the import name.
+  WasmExternalKind kind;        // kind of the import.
+  uint32_t index;               // index into the respective space.
+};
+
+// Static representation of a WASM export.
+struct WasmExport {
+  uint32_t name_length;   // length in bytes of the exported name.
+  uint32_t name_offset;   // offset in module bytes of the name to export.
+  WasmExternalKind kind;  // kind of the export.
+  uint32_t index;         // index into the respective space.
 };
 
 enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
 
+class WasmCompiledModule;
+
 // Static representation of a module.
 struct WasmModule {
   static const uint32_t kPageSize = 0x10000;    // Page size, 64kb.
+  static const uint32_t kMaxLegalPages = 65536;  // Maximum legal pages
   static const uint32_t kMinMemPages = 1;       // Minimum memory size = 64kb
   static const uint32_t kMaxMemPages = 16384;   // Maximum memory size =  1gb
 
@@ -171,7 +173,6 @@
   uint32_t min_mem_pages;     // minimum size of the memory in 64k pages.
   uint32_t max_mem_pages;     // maximum size of the memory in 64k pages.
   bool mem_export;            // true if the memory is exported.
-  bool mem_external;          // true if the memory is external.
   // TODO(wasm): reconcile start function index being an int with
   // the fact that we index on uint32_t, so we may technically not be
   // able to represent some start_function_index -es.
@@ -180,12 +181,16 @@
 
   std::vector<WasmGlobal> globals;             // globals in this module.
   uint32_t globals_size;                       // size of globals table.
+  uint32_t num_imported_functions;             // number of imported functions.
+  uint32_t num_declared_functions;             // number of declared functions.
+  uint32_t num_exported_functions;             // number of exported functions.
   std::vector<FunctionSig*> signatures;        // signatures in this module.
   std::vector<WasmFunction> functions;         // functions in this module.
   std::vector<WasmDataSegment> data_segments;  // data segments in this module.
   std::vector<WasmIndirectFunctionTable> function_tables;  // function tables.
   std::vector<WasmImport> import_table;        // import table.
   std::vector<WasmExport> export_table;        // export table.
+  std::vector<WasmTableInit> table_inits;      // initializations of tables
   // We store the semaphore here to extend its lifetime. In <libc-2.21, which we
   // use on the try bots, semaphore::Wait() can return while some compilation
   // tasks are still executing semaphore::Signal(). If the semaphore is cleaned
@@ -233,13 +238,12 @@
   }
 
   // Creates a new instantiation of the module in the given isolate.
-  static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
-                                           Handle<FixedArray> compiled_module,
-                                           Handle<JSReceiver> ffi,
-                                           Handle<JSArrayBuffer> memory);
+  V8_EXPORT_PRIVATE static MaybeHandle<JSObject> Instantiate(
+      Isolate* isolate, ErrorThrower* thrower, Handle<JSObject> module_object,
+      Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory);
 
-  MaybeHandle<FixedArray> CompileFunctions(Isolate* isolate,
-                                           ErrorThrower* thrower) const;
+  MaybeHandle<WasmCompiledModule> CompileFunctions(Isolate* isolate,
+                                                   ErrorThrower* thrower) const;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(WasmModule);
@@ -255,7 +259,6 @@
   Handle<JSArrayBuffer> globals_buffer;  // Handle to array buffer of globals.
   std::vector<Handle<FixedArray>> function_tables;  // indirect function tables.
   std::vector<Handle<Code>> function_code;  // code objects for each function.
-  std::vector<Handle<Code>> import_code;    // code objects for each import.
   // -- raw memory ------------------------------------------------------------
   byte* mem_start;  // start of linear memory.
   uint32_t mem_size;  // size of the linear memory.
@@ -266,7 +269,6 @@
       : module(m),
         function_tables(m->function_tables.size()),
         function_code(m->functions.size()),
-        import_code(m->import_table.size()),
         mem_start(nullptr),
         mem_size(0),
         globals_start(nullptr) {}
@@ -278,9 +280,6 @@
   const WasmModule* module;
   WasmModuleInstance* instance;
   ModuleOrigin origin;
-  // TODO(mtrofin): remove this once we introduce WASM_DIRECT_CALL
-  // reloc infos.
-  std::vector<Handle<Code>> placeholders;
 
   bool IsValidGlobal(uint32_t index) const {
     return module && index < module->globals.size();
@@ -291,9 +290,6 @@
   bool IsValidSignature(uint32_t index) const {
     return module && index < module->signatures.size();
   }
-  bool IsValidImport(uint32_t index) const {
-    return module && index < module->import_table.size();
-  }
   bool IsValidTable(uint32_t index) const {
     return module && index < module->function_tables.size();
   }
@@ -305,10 +301,6 @@
     DCHECK(IsValidFunction(index));
     return module->functions[index].sig;
   }
-  FunctionSig* GetImportSignature(uint32_t index) {
-    DCHECK(IsValidImport(index));
-    return module->import_table[index].sig;
-  }
   FunctionSig* GetSignature(uint32_t index) {
     DCHECK(IsValidSignature(index));
     return module->signatures[index];
@@ -320,14 +312,15 @@
 
   bool asm_js() { return origin == kAsmJsOrigin; }
 
-  Handle<Code> GetCodeOrPlaceholder(uint32_t index) const;
-  Handle<Code> GetImportCode(uint32_t index);
+  Handle<Code> GetFunctionCode(uint32_t index) {
+    DCHECK_NOT_NULL(instance);
+    return instance->function_code[index];
+  }
 
   static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
                                                          FunctionSig* sig);
   static compiler::CallDescriptor* GetI32WasmCallDescriptor(
       Zone* zone, compiler::CallDescriptor* descriptor);
-  compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
 };
 
 // A helper for printing out the names of functions.
@@ -347,6 +340,128 @@
 typedef std::vector<std::pair<int, int>> FunctionOffsets;
 typedef Result<FunctionOffsets> FunctionOffsetsResult;
 
+class WasmCompiledModule : public FixedArray {
+ public:
+  static WasmCompiledModule* cast(Object* fixed_array) {
+    return reinterpret_cast<WasmCompiledModule*>(fixed_array);
+  }
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID)                           \
+  Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); }      \
+                                                                     \
+  MaybeHandle<TYPE> maybe_##NAME() const {                           \
+    if (has_##NAME()) return NAME();                                 \
+    return MaybeHandle<TYPE>();                                      \
+  }                                                                  \
+                                                                     \
+  TYPE* ptr_to_##NAME() const {                                      \
+    Object* obj = get(ID);                                           \
+    if (!obj->Is##TYPE()) return nullptr;                            \
+    return TYPE::cast(obj);                                          \
+  }                                                                  \
+                                                                     \
+  void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
+                                                                     \
+  void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }            \
+                                                                     \
+  bool has_##NAME() const { return get(ID)->Is##TYPE(); }            \
+                                                                     \
+  void reset_##NAME() { set_undefined(ID); }
+
+#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+
+#define WCM_SMALL_NUMBER(TYPE, NAME)                               \
+  TYPE NAME() const {                                              \
+    return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
+  }
+
+#define WCM_WEAK_LINK(TYPE, NAME)                        \
+  WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
+                                                         \
+  Handle<TYPE> NAME() const {                            \
+    return handle(TYPE::cast(weak_##NAME()->value()));   \
+  }
+
+#define CORE_WCM_PROPERTY_TABLE(MACRO)                \
+  MACRO(OBJECT, FixedArray, code_table)               \
+  MACRO(OBJECT, FixedArray, import_data)              \
+  MACRO(OBJECT, FixedArray, exports)                  \
+  MACRO(OBJECT, FixedArray, startup_function)         \
+  MACRO(OBJECT, FixedArray, indirect_function_tables) \
+  MACRO(OBJECT, String, module_bytes)                 \
+  MACRO(OBJECT, ByteArray, function_names)            \
+  MACRO(SMALL_NUMBER, uint32_t, min_memory_pages)     \
+  MACRO(OBJECT, FixedArray, data_segments_info)       \
+  MACRO(OBJECT, ByteArray, data_segments)             \
+  MACRO(SMALL_NUMBER, uint32_t, globals_size)         \
+  MACRO(OBJECT, JSArrayBuffer, heap)                  \
+  MACRO(SMALL_NUMBER, bool, export_memory)            \
+  MACRO(SMALL_NUMBER, ModuleOrigin, origin)           \
+  MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+  MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+  MACRO(WEAK_LINK, JSObject, owning_instance)         \
+  MACRO(WEAK_LINK, JSObject, module_object)
+
+#if DEBUG
+#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
+#else
+#define DEBUG_ONLY_TABLE(IGNORE)
+  uint32_t instance_id() const { return -1; }
+#endif
+
+#define WCM_PROPERTY_TABLE(MACRO) \
+  CORE_WCM_PROPERTY_TABLE(MACRO)  \
+  DEBUG_ONLY_TABLE(MACRO)
+
+ private:
+  enum PropertyIndices {
+#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
+    WCM_PROPERTY_TABLE(INDICES) Count
+#undef INDICES
+  };
+
+ public:
+  static Handle<WasmCompiledModule> New(Isolate* isolate,
+                                        uint32_t min_memory_pages,
+                                        uint32_t globals_size,
+                                        bool export_memory,
+                                        ModuleOrigin origin);
+
+  static Handle<WasmCompiledModule> Clone(Isolate* isolate,
+                                          Handle<WasmCompiledModule> module) {
+    Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
+        isolate->factory()->CopyFixedArray(module));
+    ret->Init();
+    ret->reset_weak_owning_instance();
+    ret->reset_weak_next_instance();
+    ret->reset_weak_prev_instance();
+    return ret;
+  }
+
+  uint32_t mem_size() const {
+    DCHECK(has_heap());
+    return heap()->byte_length()->Number();
+  }
+
+  uint32_t default_mem_size() const {
+    return min_memory_pages() * WasmModule::kPageSize;
+  }
+
+#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+  WCM_PROPERTY_TABLE(DECLARATION)
+#undef DECLARATION
+
+  void PrintInstancesChain();
+
+ private:
+#if DEBUG
+  static uint32_t instance_id_counter_;
+#endif
+  void Init();
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
+};
+
 // Extract a function name from the given wasm object.
 // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
 // valid UTF-8 string.
@@ -399,18 +514,38 @@
                            const std::vector<Handle<Code>>* code_table);
 
 Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
-                                            Handle<FixedArray> compiled_module);
+                                            Handle<FixedArray> compiled_module,
+                                            ModuleOrigin origin);
+
+V8_EXPORT_PRIVATE MaybeHandle<JSObject> CreateModuleObjectFromBytes(
+    Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
+    ModuleOrigin origin);
+
+V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
+                                           const byte* end,
+                                           ErrorThrower* thrower,
+                                           ModuleOrigin origin);
+
+// Get the number of imported functions for a WASM instance.
+uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object);
+
+// Assumed to be called with a code object associated to a wasm module instance.
+// Intended to be called from runtime functions.
+// Returns nullptr on failing to get owning instance.
+Object* GetOwningWasmInstance(Code* code);
+
+int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
+
+int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
+                           uint32_t pages);
 
 namespace testing {
 
-// Decode, verify, and run the function labeled "main" in the
-// given encoded module. The module should have no imports.
-int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
-                                const byte* module_end, bool asm_js = false);
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+                            int instance_count);
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj);
+void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
 
-int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
-                     ErrorThrower* thrower, const char* name, int argc,
-                     Handle<Object> argv[]);
 }  // namespace testing
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index 8f54207..cd2dde4 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -38,6 +38,18 @@
   return "Unknown";
 }
 
+bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
+  switch (opcode) {
+#define CHECK_PREFIX(name, opcode) \
+  case k##name##Prefix:            \
+    return true;
+    FOREACH_PREFIX(CHECK_PREFIX)
+#undef CHECK_PREFIX
+    default:
+      return false;
+  }
+}
+
 std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
   if (sig.return_count() == 0) os << "v";
   for (size_t i = 0; i < sig.return_count(); ++i) {
@@ -74,6 +86,7 @@
     nullptr, FOREACH_SIMD_SIGNATURE(DECLARE_SIMD_SIG_ENTRY)};
 
 static byte kSimpleExprSigTable[256];
+static byte kSimpleAsmjsExprSigTable[256];
 static byte kSimdExprSigTable[256];
 
 // Initialize the signature table.
@@ -81,14 +94,16 @@
 #define SET_SIG_TABLE(name, opcode, sig) \
   kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
   FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
-  FOREACH_SIMPLE_MEM_OPCODE(SET_SIG_TABLE);
-  FOREACH_ASMJS_COMPAT_OPCODE(SET_SIG_TABLE);
 #undef SET_SIG_TABLE
+#define SET_ASMJS_SIG_TABLE(name, opcode, sig) \
+  kSimpleAsmjsExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
+  FOREACH_ASMJS_COMPAT_OPCODE(SET_ASMJS_SIG_TABLE);
+#undef SET_ASMJS_SIG_TABLE
   byte simd_index;
 #define SET_SIG_TABLE(name, opcode, sig) \
   simd_index = opcode & 0xff;            \
   kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
-  FOREACH_SIMD_OPCODE(SET_SIG_TABLE)
+  FOREACH_SIMD_0_OPERAND_OPCODE(SET_SIG_TABLE)
 #undef SET_SIG_TABLE
 }
 
@@ -102,6 +117,10 @@
     return const_cast<FunctionSig*>(
         kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
   }
+  FunctionSig* AsmjsSignature(WasmOpcode opcode) const {
+    return const_cast<FunctionSig*>(
+        kSimpleExprSigs[kSimpleAsmjsExprSigTable[static_cast<byte>(opcode)]]);
+  }
   FunctionSig* SimdSignature(WasmOpcode opcode) const {
     return const_cast<FunctionSig*>(
         kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
@@ -118,6 +137,10 @@
   }
 }
 
+FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
+  return sig_table.Get().AsmjsSignature(opcode);
+}
+
 // TODO(titzer): pull WASM_64 up to a common header.
 #if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
 #define WASM_64 1
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 4d66e56..03827b2 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -22,6 +22,9 @@
   kLocalS128 = 5
 };
 
+// Type code for multi-value block types.
+static const uint8_t kMultivalBlock = 0x41;
+
 // We reuse the internal machine type to represent WebAssembly AST types.
 // A typedef improves readability without adding a whole new type system.
 typedef MachineRepresentation LocalType;
@@ -44,7 +47,7 @@
 
 // Control expressions and blocks.
 #define FOREACH_CONTROL_OPCODE(V) \
-  V(Nop, 0x00, _)                 \
+  V(Unreachable, 0x00, _)         \
   V(Block, 0x01, _)               \
   V(Loop, 0x02, _)                \
   V(If, 0x03, _)                  \
@@ -54,13 +57,10 @@
   V(BrIf, 0x07, _)                \
   V(BrTable, 0x08, _)             \
   V(Return, 0x09, _)              \
-  V(Unreachable, 0x0a, _)         \
+  V(Nop, 0x0a, _)                 \
   V(Throw, 0xfa, _)               \
-  V(TryCatch, 0xfb, _)            \
-  V(TryCatchFinally, 0xfc, _)     \
-  V(TryFinally, 0xfd, _)          \
+  V(Try, 0xfb, _)                 \
   V(Catch, 0xfe, _)               \
-  V(Finally, 0xff, _)             \
   V(End, 0x0F, _)
 
 // Constants, locals, globals, and calls.
@@ -71,9 +71,10 @@
   V(F32Const, 0x13, _)         \
   V(GetLocal, 0x14, _)         \
   V(SetLocal, 0x15, _)         \
+  V(TeeLocal, 0x19, _)         \
+  V(Drop, 0x0b, _)             \
   V(CallFunction, 0x16, _)     \
   V(CallIndirect, 0x17, _)     \
-  V(CallImport, 0x18, _)       \
   V(I8Const, 0xcb, _)          \
   V(GetGlobal, 0xbb, _)        \
   V(SetGlobal, 0xbc, _)
@@ -273,141 +274,144 @@
   V(I32AsmjsSConvertF64, 0xe2, i_d)    \
   V(I32AsmjsUConvertF64, 0xe3, i_d)
 
-#define FOREACH_SIMD_OPCODE(V)         \
-  V(F32x4Splat, 0xe500, s_f)           \
-  V(F32x4ExtractLane, 0xe501, f_si)    \
-  V(F32x4ReplaceLane, 0xe502, s_sif)   \
-  V(F32x4Abs, 0xe503, s_s)             \
-  V(F32x4Neg, 0xe504, s_s)             \
-  V(F32x4Sqrt, 0xe505, s_s)            \
-  V(F32x4RecipApprox, 0xe506, s_s)     \
-  V(F32x4SqrtApprox, 0xe507, s_s)      \
-  V(F32x4Add, 0xe508, s_ss)            \
-  V(F32x4Sub, 0xe509, s_ss)            \
-  V(F32x4Mul, 0xe50a, s_ss)            \
-  V(F32x4Div, 0xe50b, s_ss)            \
-  V(F32x4Min, 0xe50c, s_ss)            \
-  V(F32x4Max, 0xe50d, s_ss)            \
-  V(F32x4MinNum, 0xe50e, s_ss)         \
-  V(F32x4MaxNum, 0xe50f, s_ss)         \
-  V(F32x4Eq, 0xe510, s_ss)             \
-  V(F32x4Ne, 0xe511, s_ss)             \
-  V(F32x4Lt, 0xe512, s_ss)             \
-  V(F32x4Le, 0xe513, s_ss)             \
-  V(F32x4Gt, 0xe514, s_ss)             \
-  V(F32x4Ge, 0xe515, s_ss)             \
-  V(F32x4Select, 0xe516, s_sss)        \
-  V(F32x4Swizzle, 0xe517, s_s)         \
-  V(F32x4Shuffle, 0xe518, s_ss)        \
-  V(F32x4FromInt32x4, 0xe519, s_s)     \
-  V(F32x4FromUint32x4, 0xe51a, s_s)    \
-  V(I32x4Splat, 0xe51b, s_i)           \
-  V(I32x4ExtractLane, 0xe51c, i_si)    \
-  V(I32x4ReplaceLane, 0xe51d, s_sii)   \
-  V(I32x4Neg, 0xe51e, s_s)             \
-  V(I32x4Add, 0xe51f, s_ss)            \
-  V(I32x4Sub, 0xe520, s_ss)            \
-  V(I32x4Mul, 0xe521, s_ss)            \
-  V(I32x4Min_s, 0xe522, s_ss)          \
-  V(I32x4Max_s, 0xe523, s_ss)          \
-  V(I32x4Shl, 0xe524, s_si)            \
-  V(I32x4Shr_s, 0xe525, s_si)          \
-  V(I32x4Eq, 0xe526, s_ss)             \
-  V(I32x4Ne, 0xe527, s_ss)             \
-  V(I32x4Lt_s, 0xe528, s_ss)           \
-  V(I32x4Le_s, 0xe529, s_ss)           \
-  V(I32x4Gt_s, 0xe52a, s_ss)           \
-  V(I32x4Ge_s, 0xe52b, s_ss)           \
-  V(I32x4Select, 0xe52c, s_sss)        \
-  V(I32x4Swizzle, 0xe52d, s_s)         \
-  V(I32x4Shuffle, 0xe52e, s_ss)        \
-  V(I32x4FromFloat32x4, 0xe52f, s_s)   \
-  V(I32x4Min_u, 0xe530, s_ss)          \
-  V(I32x4Max_u, 0xe531, s_ss)          \
-  V(I32x4Shr_u, 0xe532, s_ss)          \
-  V(I32x4Lt_u, 0xe533, s_ss)           \
-  V(I32x4Le_u, 0xe534, s_ss)           \
-  V(I32x4Gt_u, 0xe535, s_ss)           \
-  V(I32x4Ge_u, 0xe536, s_ss)           \
-  V(Ui32x4FromFloat32x4, 0xe537, s_s)  \
-  V(I16x8Splat, 0xe538, s_i)           \
-  V(I16x8ExtractLane, 0xe539, i_si)    \
-  V(I16x8ReplaceLane, 0xe53a, s_sii)   \
-  V(I16x8Neg, 0xe53b, s_s)             \
-  V(I16x8Add, 0xe53c, s_ss)            \
-  V(I16x8AddSaturate_s, 0xe53d, s_ss)  \
-  V(I16x8Sub, 0xe53e, s_ss)            \
-  V(I16x8SubSaturate_s, 0xe53f, s_ss)  \
-  V(I16x8Mul, 0xe540, s_ss)            \
-  V(I16x8Min_s, 0xe541, s_ss)          \
-  V(I16x8Max_s, 0xe542, s_ss)          \
-  V(I16x8Shl, 0xe543, s_si)            \
-  V(I16x8Shr_s, 0xe544, s_si)          \
-  V(I16x8Eq, 0xe545, s_ss)             \
-  V(I16x8Ne, 0xe546, s_ss)             \
-  V(I16x8Lt_s, 0xe547, s_ss)           \
-  V(I16x8Le_s, 0xe548, s_ss)           \
-  V(I16x8Gt_s, 0xe549, s_ss)           \
-  V(I16x8Ge_s, 0xe54a, s_ss)           \
-  V(I16x8Select, 0xe54b, s_sss)        \
-  V(I16x8Swizzle, 0xe54c, s_s)         \
-  V(I16x8Shuffle, 0xe54d, s_ss)        \
-  V(I16x8AddSaturate_u, 0xe54e, s_ss)  \
-  V(I16x8SubSaturate_u, 0xe54f, s_ss)  \
-  V(I16x8Min_u, 0xe550, s_ss)          \
-  V(I16x8Max_u, 0xe551, s_ss)          \
-  V(I16x8Shr_u, 0xe552, s_si)          \
-  V(I16x8Lt_u, 0xe553, s_ss)           \
-  V(I16x8Le_u, 0xe554, s_ss)           \
-  V(I16x8Gt_u, 0xe555, s_ss)           \
-  V(I16x8Ge_u, 0xe556, s_ss)           \
-  V(I8x16Splat, 0xe557, s_i)           \
-  V(I8x16ExtractLane, 0xe558, i_si)    \
-  V(I8x16ReplaceLane, 0xe559, s_sii)   \
-  V(I8x16Neg, 0xe55a, s_s)             \
-  V(I8x16Add, 0xe55b, s_ss)            \
-  V(I8x16AddSaturate_s, 0xe55c, s_ss)  \
-  V(I8x16Sub, 0xe55d, s_ss)            \
-  V(I8x16SubSaturate_s, 0xe55e, s_ss)  \
-  V(I8x16Mul, 0xe55f, s_ss)            \
-  V(I8x16Min_s, 0xe560, s_ss)          \
-  V(I8x16Max_s, 0xe561, s_ss)          \
-  V(I8x16Shl, 0xe562, s_si)            \
-  V(I8x16Shr_s, 0xe563, s_si)          \
-  V(I8x16Eq, 0xe564, s_ss)             \
-  V(I8x16Neq, 0xe565, s_ss)            \
-  V(I8x16Lt_s, 0xe566, s_ss)           \
-  V(I8x16Le_s, 0xe567, s_ss)           \
-  V(I8x16Gt_s, 0xe568, s_ss)           \
-  V(I8x16Ge_s, 0xe569, s_ss)           \
-  V(I8x16Select, 0xe56a, s_sss)        \
-  V(I8x16Swizzle, 0xe56b, s_s)         \
-  V(I8x16Shuffle, 0xe56c, s_ss)        \
-  V(I8x16AddSaturate_u, 0xe56d, s_ss)  \
-  V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
-  V(I8x16Min_u, 0xe56f, s_ss)          \
-  V(I8x16Max_u, 0xe570, s_ss)          \
-  V(I8x16Shr_u, 0xe571, s_ss)          \
-  V(I8x16Lt_u, 0xe572, s_ss)           \
-  V(I8x16Le_u, 0xe573, s_ss)           \
-  V(I8x16Gt_u, 0xe574, s_ss)           \
-  V(I8x16Ge_u, 0xe575, s_ss)           \
-  V(S128And, 0xe576, s_ss)             \
-  V(S128Ior, 0xe577, s_ss)             \
-  V(S128Xor, 0xe578, s_ss)             \
+#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+  V(F32x4Splat, 0xe500, s_f)             \
+  V(F32x4ReplaceLane, 0xe502, s_sif)     \
+  V(F32x4Abs, 0xe503, s_s)               \
+  V(F32x4Neg, 0xe504, s_s)               \
+  V(F32x4Sqrt, 0xe505, s_s)              \
+  V(F32x4RecipApprox, 0xe506, s_s)       \
+  V(F32x4SqrtApprox, 0xe507, s_s)        \
+  V(F32x4Add, 0xe508, s_ss)              \
+  V(F32x4Sub, 0xe509, s_ss)              \
+  V(F32x4Mul, 0xe50a, s_ss)              \
+  V(F32x4Div, 0xe50b, s_ss)              \
+  V(F32x4Min, 0xe50c, s_ss)              \
+  V(F32x4Max, 0xe50d, s_ss)              \
+  V(F32x4MinNum, 0xe50e, s_ss)           \
+  V(F32x4MaxNum, 0xe50f, s_ss)           \
+  V(F32x4Eq, 0xe510, s_ss)               \
+  V(F32x4Ne, 0xe511, s_ss)               \
+  V(F32x4Lt, 0xe512, s_ss)               \
+  V(F32x4Le, 0xe513, s_ss)               \
+  V(F32x4Gt, 0xe514, s_ss)               \
+  V(F32x4Ge, 0xe515, s_ss)               \
+  V(F32x4Select, 0xe516, s_sss)          \
+  V(F32x4Swizzle, 0xe517, s_s)           \
+  V(F32x4Shuffle, 0xe518, s_ss)          \
+  V(F32x4FromInt32x4, 0xe519, s_s)       \
+  V(F32x4FromUint32x4, 0xe51a, s_s)      \
+  V(I32x4Splat, 0xe51b, s_i)             \
+  V(I32x4ReplaceLane, 0xe51d, s_sii)     \
+  V(I32x4Neg, 0xe51e, s_s)               \
+  V(I32x4Add, 0xe51f, s_ss)              \
+  V(I32x4Sub, 0xe520, s_ss)              \
+  V(I32x4Mul, 0xe521, s_ss)              \
+  V(I32x4Min_s, 0xe522, s_ss)            \
+  V(I32x4Max_s, 0xe523, s_ss)            \
+  V(I32x4Shl, 0xe524, s_si)              \
+  V(I32x4Shr_s, 0xe525, s_si)            \
+  V(I32x4Eq, 0xe526, s_ss)               \
+  V(I32x4Ne, 0xe527, s_ss)               \
+  V(I32x4Lt_s, 0xe528, s_ss)             \
+  V(I32x4Le_s, 0xe529, s_ss)             \
+  V(I32x4Gt_s, 0xe52a, s_ss)             \
+  V(I32x4Ge_s, 0xe52b, s_ss)             \
+  V(I32x4Select, 0xe52c, s_sss)          \
+  V(I32x4Swizzle, 0xe52d, s_s)           \
+  V(I32x4Shuffle, 0xe52e, s_ss)          \
+  V(I32x4FromFloat32x4, 0xe52f, s_s)     \
+  V(I32x4Min_u, 0xe530, s_ss)            \
+  V(I32x4Max_u, 0xe531, s_ss)            \
+  V(I32x4Shr_u, 0xe532, s_ss)            \
+  V(I32x4Lt_u, 0xe533, s_ss)             \
+  V(I32x4Le_u, 0xe534, s_ss)             \
+  V(I32x4Gt_u, 0xe535, s_ss)             \
+  V(I32x4Ge_u, 0xe536, s_ss)             \
+  V(Ui32x4FromFloat32x4, 0xe537, s_s)    \
+  V(I16x8Splat, 0xe538, s_i)             \
+  V(I16x8ReplaceLane, 0xe53a, s_sii)     \
+  V(I16x8Neg, 0xe53b, s_s)               \
+  V(I16x8Add, 0xe53c, s_ss)              \
+  V(I16x8AddSaturate_s, 0xe53d, s_ss)    \
+  V(I16x8Sub, 0xe53e, s_ss)              \
+  V(I16x8SubSaturate_s, 0xe53f, s_ss)    \
+  V(I16x8Mul, 0xe540, s_ss)              \
+  V(I16x8Min_s, 0xe541, s_ss)            \
+  V(I16x8Max_s, 0xe542, s_ss)            \
+  V(I16x8Shl, 0xe543, s_si)              \
+  V(I16x8Shr_s, 0xe544, s_si)            \
+  V(I16x8Eq, 0xe545, s_ss)               \
+  V(I16x8Ne, 0xe546, s_ss)               \
+  V(I16x8Lt_s, 0xe547, s_ss)             \
+  V(I16x8Le_s, 0xe548, s_ss)             \
+  V(I16x8Gt_s, 0xe549, s_ss)             \
+  V(I16x8Ge_s, 0xe54a, s_ss)             \
+  V(I16x8Select, 0xe54b, s_sss)          \
+  V(I16x8Swizzle, 0xe54c, s_s)           \
+  V(I16x8Shuffle, 0xe54d, s_ss)          \
+  V(I16x8AddSaturate_u, 0xe54e, s_ss)    \
+  V(I16x8SubSaturate_u, 0xe54f, s_ss)    \
+  V(I16x8Min_u, 0xe550, s_ss)            \
+  V(I16x8Max_u, 0xe551, s_ss)            \
+  V(I16x8Shr_u, 0xe552, s_si)            \
+  V(I16x8Lt_u, 0xe553, s_ss)             \
+  V(I16x8Le_u, 0xe554, s_ss)             \
+  V(I16x8Gt_u, 0xe555, s_ss)             \
+  V(I16x8Ge_u, 0xe556, s_ss)             \
+  V(I8x16Splat, 0xe557, s_i)             \
+  V(I8x16ReplaceLane, 0xe559, s_sii)     \
+  V(I8x16Neg, 0xe55a, s_s)               \
+  V(I8x16Add, 0xe55b, s_ss)              \
+  V(I8x16AddSaturate_s, 0xe55c, s_ss)    \
+  V(I8x16Sub, 0xe55d, s_ss)              \
+  V(I8x16SubSaturate_s, 0xe55e, s_ss)    \
+  V(I8x16Mul, 0xe55f, s_ss)              \
+  V(I8x16Min_s, 0xe560, s_ss)            \
+  V(I8x16Max_s, 0xe561, s_ss)            \
+  V(I8x16Shl, 0xe562, s_si)              \
+  V(I8x16Shr_s, 0xe563, s_si)            \
+  V(I8x16Eq, 0xe564, s_ss)               \
+  V(I8x16Neq, 0xe565, s_ss)              \
+  V(I8x16Lt_s, 0xe566, s_ss)             \
+  V(I8x16Le_s, 0xe567, s_ss)             \
+  V(I8x16Gt_s, 0xe568, s_ss)             \
+  V(I8x16Ge_s, 0xe569, s_ss)             \
+  V(I8x16Select, 0xe56a, s_sss)          \
+  V(I8x16Swizzle, 0xe56b, s_s)           \
+  V(I8x16Shuffle, 0xe56c, s_ss)          \
+  V(I8x16AddSaturate_u, 0xe56d, s_ss)    \
+  V(I8x16Sub_saturate_u, 0xe56e, s_ss)   \
+  V(I8x16Min_u, 0xe56f, s_ss)            \
+  V(I8x16Max_u, 0xe570, s_ss)            \
+  V(I8x16Shr_u, 0xe571, s_ss)            \
+  V(I8x16Lt_u, 0xe572, s_ss)             \
+  V(I8x16Le_u, 0xe573, s_ss)             \
+  V(I8x16Gt_u, 0xe574, s_ss)             \
+  V(I8x16Ge_u, 0xe575, s_ss)             \
+  V(S128And, 0xe576, s_ss)               \
+  V(S128Ior, 0xe577, s_ss)               \
+  V(S128Xor, 0xe578, s_ss)               \
   V(S128Not, 0xe579, s_s)
 
+#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+  V(F32x4ExtractLane, 0xe501, _)         \
+  V(I32x4ExtractLane, 0xe51c, _)         \
+  V(I16x8ExtractLane, 0xe539, _)         \
+  V(I8x16ExtractLane, 0xe558, _)
+
 // All opcodes.
-#define FOREACH_OPCODE(V)        \
-  FOREACH_CONTROL_OPCODE(V)      \
-  FOREACH_MISC_OPCODE(V)         \
-  FOREACH_SIMPLE_OPCODE(V)       \
-  FOREACH_SIMPLE_MEM_OPCODE(V)   \
-  FOREACH_STORE_MEM_OPCODE(V)    \
-  FOREACH_LOAD_MEM_OPCODE(V)     \
-  FOREACH_MISC_MEM_OPCODE(V)     \
-  FOREACH_ASMJS_COMPAT_OPCODE(V) \
-  FOREACH_SIMD_OPCODE(V)
+#define FOREACH_OPCODE(V)          \
+  FOREACH_CONTROL_OPCODE(V)        \
+  FOREACH_MISC_OPCODE(V)           \
+  FOREACH_SIMPLE_OPCODE(V)         \
+  FOREACH_SIMPLE_MEM_OPCODE(V)     \
+  FOREACH_STORE_MEM_OPCODE(V)      \
+  FOREACH_LOAD_MEM_OPCODE(V)       \
+  FOREACH_MISC_MEM_OPCODE(V)       \
+  FOREACH_ASMJS_COMPAT_OPCODE(V)   \
+  FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+  FOREACH_SIMD_1_OPERAND_OPCODE(V)
 
 // All signatures.
 #define FOREACH_SIGNATURE(V)         \
@@ -443,12 +447,10 @@
 #define FOREACH_SIMD_SIGNATURE(V)                  \
   V(s_s, kAstS128, kAstS128)                       \
   V(s_f, kAstS128, kAstF32)                        \
-  V(f_si, kAstF32, kAstS128, kAstI32)              \
   V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32)   \
   V(s_ss, kAstS128, kAstS128, kAstS128)            \
   V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
   V(s_i, kAstS128, kAstI32)                        \
-  V(i_si, kAstI32, kAstS128, kAstI32)              \
   V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32)   \
   V(s_si, kAstS128, kAstS128, kAstI32)
 
@@ -489,6 +491,8 @@
   static const char* OpcodeName(WasmOpcode opcode);
   static const char* ShortOpcodeName(WasmOpcode opcode);
   static FunctionSig* Signature(WasmOpcode opcode);
+  static FunctionSig* AsmjsSignature(WasmOpcode opcode);
+  static bool IsPrefixOpcode(WasmOpcode opcode);
 
   static int TrapReasonToMessageId(TrapReason reason);
   static const char* TrapReasonMessage(TrapReason reason);
@@ -497,6 +501,8 @@
     return 1 << ElementSizeLog2Of(type.representation());
   }
 
+  static byte MemSize(LocalType type) { return 1 << ElementSizeLog2Of(type); }
+
   static LocalTypeCode LocalTypeCodeFor(LocalType type) {
     switch (type) {
       case kAstI32:
@@ -507,10 +513,10 @@
         return kLocalF32;
       case kAstF64:
         return kLocalF64;
-      case kAstStmt:
-        return kLocalVoid;
       case kAstS128:
         return kLocalS128;
+      case kAstStmt:
+        return kLocalVoid;
       default:
         UNREACHABLE();
         return kLocalVoid;
diff --git a/src/wasm/wasm-result.cc b/src/wasm/wasm-result.cc
index 30268ac..7d251f0 100644
--- a/src/wasm/wasm-result.cc
+++ b/src/wasm/wasm-result.cc
@@ -27,15 +27,13 @@
   return os;
 }
 
-void ErrorThrower::Error(const char* format, ...) {
+void ErrorThrower::Format(i::Handle<i::JSFunction> constructor,
+                          const char* format, va_list args) {
   // Only report the first error.
   if (error()) return;
 
   char buffer[256];
-  va_list arguments;
-  va_start(arguments, format);
-  base::OS::VSNPrintF(buffer, 255, format, arguments);
-  va_end(arguments);
+  base::OS::VSNPrintF(buffer, 255, format, args);
 
   std::ostringstream str;
   if (context_ != nullptr) {
@@ -43,12 +41,39 @@
   }
   str << buffer;
 
-  message_ = isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+  i::Handle<i::String> message =
+      isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+  exception_ = isolate_->factory()->NewError(constructor, message);
+}
+
+void ErrorThrower::Error(const char* format, ...) {
+  if (error()) return;
+  va_list arguments;
+  va_start(arguments, format);
+  Format(isolate_->error_function(), format, arguments);
+  va_end(arguments);
+}
+
+void ErrorThrower::TypeError(const char* format, ...) {
+  if (error()) return;
+  va_list arguments;
+  va_start(arguments, format);
+  Format(isolate_->type_error_function(), format, arguments);
+  va_end(arguments);
+}
+
+void ErrorThrower::RangeError(const char* format, ...) {
+  if (error()) return;
+  va_list arguments;
+  va_start(arguments, format);
+  CHECK(*isolate_->range_error_function() != *isolate_->type_error_function());
+  Format(isolate_->range_error_function(), format, arguments);
+  va_end(arguments);
 }
 
 ErrorThrower::~ErrorThrower() {
   if (error() && !isolate_->has_pending_exception()) {
-    isolate_->ScheduleThrow(*message_);
+    isolate_->ScheduleThrow(*exception_);
   }
 }
 }  // namespace wasm
diff --git a/src/wasm/wasm-result.h b/src/wasm/wasm-result.h
index f16c159..ecc54e5 100644
--- a/src/wasm/wasm-result.h
+++ b/src/wasm/wasm-result.h
@@ -22,19 +22,7 @@
 // Error codes for programmatic checking of the decoder's verification.
 enum ErrorCode {
   kSuccess,
-  kError,                 // TODO(titzer): remove me
-  kOutOfMemory,           // decoder ran out of memory
-  kEndOfCode,             // end of code reached prematurely
-  kInvalidOpcode,         // found invalid opcode
-  kUnreachableCode,       // found unreachable code
-  kImproperContinue,      // improperly nested continue
-  kImproperBreak,         // improperly nested break
-  kReturnCount,           // return count mismatch
-  kTypeError,             // type mismatch
-  kInvalidLocalIndex,     // invalid local
-  kInvalidGlobalIndex,    // invalid global
-  kInvalidFunctionIndex,  // invalid function
-  kInvalidMemType         // invalid memory type
+  kError,  // TODO(titzer): introduce real error codes
 };
 
 // The overall result of decoding a function or a module.
@@ -97,33 +85,37 @@
 std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
 
 // A helper for generating error messages that bubble up to JS exceptions.
-class ErrorThrower {
+class V8_EXPORT_PRIVATE ErrorThrower {
  public:
-  ErrorThrower(Isolate* isolate, const char* context)
+  ErrorThrower(i::Isolate* isolate, const char* context)
       : isolate_(isolate), context_(context) {}
   ~ErrorThrower();
 
   PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
 
   template <typename T>
   void Failed(const char* error, Result<T>& result) {
     std::ostringstream str;
     str << error << result;
-    return Error("%s", str.str().c_str());
+    Error("%s", str.str().c_str());
   }
 
-  i::Handle<i::String> Reify() {
-    auto result = message_;
-    message_ = i::Handle<i::String>();
+  i::Handle<i::Object> Reify() {
+    i::Handle<i::Object> result = exception_;
+    exception_ = i::Handle<i::Object>::null();
     return result;
   }
 
-  bool error() const { return !message_.is_null(); }
+  bool error() const { return !exception_.is_null(); }
 
  private:
-  Isolate* isolate_;
+  void Format(i::Handle<i::JSFunction> constructor, const char* fmt, va_list);
+
+  i::Isolate* isolate_;
   const char* context_;
-  i::Handle<i::String> message_;
+  i::Handle<i::Object> exception_;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9a0d18e..d202aad 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -79,6 +79,7 @@
   if (cross_compile) return;
 
   if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+  if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
   if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
   // SAHF is not generally available in long mode.
   if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
@@ -105,13 +106,15 @@
 void CpuFeatures::PrintTarget() { }
 void CpuFeatures::PrintFeatures() {
   printf(
-      "SSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d "
+      "SSE3=%d SSSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d "
+      "LZCNT=%d "
       "POPCNT=%d ATOM=%d\n",
-      CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
-      CpuFeatures::IsSupported(SAHF), CpuFeatures::IsSupported(AVX),
-      CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(BMI1),
-      CpuFeatures::IsSupported(BMI2), CpuFeatures::IsSupported(LZCNT),
-      CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
+      CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSSE3),
+      CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(SAHF),
+      CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
+      CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
+      CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
+      CpuFeatures::IsSupported(ATOM));
 }
 
 // -----------------------------------------------------------------------------
@@ -2834,6 +2837,77 @@
   emit(imm8);
 }
 
+void Assembler::pextrb(Register dst, XMMRegister src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x14);
+  emit_sse_operand(src, dst);
+  emit(imm8);
+}
+
+void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x14);
+  emit_sse_operand(src, dst);
+  emit(imm8);
+}
+
+void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xC4);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
+void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t imm8) {
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xC4);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
+void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0xC5);
+  emit_sse_operand(src, dst);
+  emit(imm8);
+}
+
+void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  DCHECK(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x15);
+  emit_sse_operand(src, dst);
+  emit(imm8);
+}
 
 void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
   DCHECK(IsEnabled(SSE4_1));
@@ -2847,6 +2921,17 @@
   emit(imm8);
 }
 
+void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x16);
+  emit_sse_operand(src, dst);
+  emit(imm8);
+}
 
 void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
   DCHECK(IsEnabled(SSE4_1));
@@ -2873,6 +2958,30 @@
   emit(imm8);
 }
 
+void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x20);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
+void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t imm8) {
+  DCHECK(IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x20);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
 void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
   DCHECK(CpuFeatures::IsSupported(SSE4_1));
   DCHECK(is_uint8(imm8));
@@ -3202,6 +3311,15 @@
   emit(imm8);
 }
 
+void Assembler::psllw(XMMRegister reg, byte imm8) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg);
+  emit(0x0F);
+  emit(0x71);
+  emit_sse_operand(rsi, reg);  // rsi == 6
+  emit(imm8);
+}
 
 void Assembler::pslld(XMMRegister reg, byte imm8) {
   EnsureSpace ensure_space(this);
@@ -3213,6 +3331,15 @@
   emit(imm8);
 }
 
+void Assembler::psrlw(XMMRegister reg, byte imm8) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg);
+  emit(0x0F);
+  emit(0x71);
+  emit_sse_operand(rdx, reg);  // rdx == 2
+  emit(imm8);
+}
 
 void Assembler::psrld(XMMRegister reg, byte imm8) {
   EnsureSpace ensure_space(this);
@@ -3224,6 +3351,26 @@
   emit(imm8);
 }
 
+void Assembler::psraw(XMMRegister reg, byte imm8) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg);
+  emit(0x0F);
+  emit(0x71);
+  emit_sse_operand(rsp, reg);  // rsp == 4
+  emit(imm8);
+}
+
+void Assembler::psrad(XMMRegister reg, byte imm8) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg);
+  emit(0x0F);
+  emit(0x72);
+  emit_sse_operand(rsp, reg);  // rsp == 4
+  emit(imm8);
+}
+
 void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -3789,17 +3936,6 @@
 }
 
 
-void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
-  DCHECK(!IsEnabled(AVX));
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0x76);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0x66);
@@ -3926,9 +4062,9 @@
   emit_sse_operand(src, dst);
 }
 
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
-                    XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+                       XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
+                       VexW w) {
   DCHECK(IsEnabled(AVX));
   EnsureSpace ensure_space(this);
   emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -3936,10 +4072,9 @@
   emit_sse_operand(dst, src2);
 }
 
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
-                    const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
-                    VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+                       const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
+                       VexW w) {
   DCHECK(IsEnabled(AVX));
   EnsureSpace ensure_space(this);
   emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -4409,78 +4544,81 @@
   emit_sse_operand(src, dst);
 }
 
-void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+void Assembler::sse2_instr(XMMRegister dst, XMMRegister src, byte prefix,
+                           byte escape, byte opcode) {
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0xFE);
+  emit(escape);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::paddd(XMMRegister dst, const Operand& src) {
+void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+                           byte escape, byte opcode) {
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0xFE);
+  emit(escape);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+void Assembler::ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix,
+                            byte escape1, byte escape2, byte opcode) {
+  DCHECK(IsEnabled(SSSE3));
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0xFA);
+  emit(escape1);
+  emit(escape2);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::psubd(XMMRegister dst, const Operand& src) {
+void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+                            byte escape1, byte escape2, byte opcode) {
+  DCHECK(IsEnabled(SSSE3));
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0xFA);
+  emit(escape1);
+  emit(escape2);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+void Assembler::sse4_instr(XMMRegister dst, XMMRegister src, byte prefix,
+                           byte escape1, byte escape2, byte opcode) {
   DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0x38);
-  emit(0x40);
+  emit(escape1);
+  emit(escape2);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+                           byte escape1, byte escape2, byte opcode) {
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(prefix);
   emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0x38);
-  emit(0x40);
+  emit(escape1);
+  emit(escape2);
+  emit(opcode);
   emit_sse_operand(dst, src);
 }
 
-void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+void Assembler::lddqu(XMMRegister dst, const Operand& src) {
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  emit(0x66);
+  emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
-  emit(0xF4);
-  emit_sse_operand(dst, src);
-}
-
-void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0xF4);
+  emit(0xF0);
   emit_sse_operand(dst, src);
 }
 
@@ -4494,24 +4632,6 @@
   emit(shift);
 }
 
-void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0x5B);
-  emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(dst, src);
-  emit(0x0F);
-  emit(0x5B);
-  emit_sse_operand(dst, src);
-}
-
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
   EnsureSpace ensure_space(this);
   emit(0x66);
@@ -4522,6 +4642,16 @@
   emit(shuffle);
 }
 
+void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x70);
+  emit_sse_operand(dst, src);
+  emit(shuffle);
+}
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index b2154fb..5de891c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -40,6 +40,7 @@
 #include <deque>
 
 #include "src/assembler.h"
+#include "src/x64/sse-instr.h"
 
 namespace v8 {
 namespace internal {
@@ -1072,7 +1073,91 @@
 
   void movmskps(Register dst, XMMRegister src);
 
+  void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+              SIMDPrefix pp, LeadingOpcode m, VexW w);
+  void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+              SIMDPrefix pp, LeadingOpcode m, VexW w);
+
   // SSE2 instructions
+  void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
+                  byte opcode);
+  void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+                  byte opcode);
+#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
+  void instruction(XMMRegister dst, XMMRegister src) {                \
+    sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode);         \
+  }                                                                   \
+  void instruction(XMMRegister dst, const Operand& src) {             \
+    sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode);         \
+  }
+
+  SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
+#undef DECLARE_SSE2_INSTRUCTION
+
+#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode)    \
+  void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+    vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0);          \
+  }                                                                          \
+  void v##instruction(XMMRegister dst, XMMRegister src1,                     \
+                      const Operand& src2) {                                 \
+    vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0);          \
+  }
+
+  SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
+#undef DECLARE_SSE2_AVX_INSTRUCTION
+
+  // SSE3
+  void lddqu(XMMRegister dst, const Operand& src);
+
+  // SSSE3
+  void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+                   byte escape2, byte opcode);
+  void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+                   byte escape1, byte escape2, byte opcode);
+
+#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2,     \
+                                  opcode)                                    \
+  void instruction(XMMRegister dst, XMMRegister src) {                       \
+    ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+  }                                                                          \
+  void instruction(XMMRegister dst, const Operand& src) {                    \
+    ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+  }
+
+  SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
+#undef DECLARE_SSSE3_INSTRUCTION
+
+  // SSE4
+  void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+                  byte escape2, byte opcode);
+  void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+                  byte escape1, byte escape2, byte opcode);
+#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2,     \
+                                 opcode)                                    \
+  void instruction(XMMRegister dst, XMMRegister src) {                      \
+    sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+  }                                                                         \
+  void instruction(XMMRegister dst, const Operand& src) {                   \
+    sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+  }
+
+  SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+#undef DECLARE_SSE4_INSTRUCTION
+
+#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2,  \
+                                      opcode)                                 \
+  void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) {  \
+    vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
+  }                                                                           \
+  void v##instruction(XMMRegister dst, XMMRegister src1,                      \
+                      const Operand& src2) {                                  \
+    vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
+  }
+
+  SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
+  SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
+#undef DECLARE_SSE34_AVX_INSTRUCTION
+
   void movd(XMMRegister dst, Register src);
   void movd(XMMRegister dst, const Operand& src);
   void movd(Register dst, XMMRegister src);
@@ -1101,8 +1186,12 @@
 
   void psllq(XMMRegister reg, byte imm8);
   void psrlq(XMMRegister reg, byte imm8);
+  void psllw(XMMRegister reg, byte imm8);
   void pslld(XMMRegister reg, byte imm8);
+  void psrlw(XMMRegister reg, byte imm8);
   void psrld(XMMRegister reg, byte imm8);
+  void psraw(XMMRegister reg, byte imm8);
+  void psrad(XMMRegister reg, byte imm8);
 
   void cvttsd2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, XMMRegister src);
@@ -1155,7 +1244,6 @@
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
   void cmpltsd(XMMRegister dst, XMMRegister src);
-  void pcmpeqd(XMMRegister dst, XMMRegister src);
 
   void movmskpd(Register dst, XMMRegister src);
 
@@ -1166,7 +1254,16 @@
   // SSE 4.1 instruction
   void insertps(XMMRegister dst, XMMRegister src, byte imm8);
   void extractps(Register dst, XMMRegister src, byte imm8);
+  void pextrb(Register dst, XMMRegister src, int8_t imm8);
+  void pextrb(const Operand& dst, XMMRegister src, int8_t imm8);
+  void pextrw(Register dst, XMMRegister src, int8_t imm8);
+  void pextrw(const Operand& dst, XMMRegister src, int8_t imm8);
   void pextrd(Register dst, XMMRegister src, int8_t imm8);
+  void pextrd(const Operand& dst, XMMRegister src, int8_t imm8);
+  void pinsrb(XMMRegister dst, Register src, int8_t imm8);
+  void pinsrb(XMMRegister dst, const Operand& src, int8_t imm8);
+  void pinsrw(XMMRegister dst, Register src, int8_t imm8);
+  void pinsrw(XMMRegister dst, const Operand& src, int8_t imm8);
   void pinsrd(XMMRegister dst, Register src, int8_t imm8);
   void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
 
@@ -1208,18 +1305,9 @@
   void movups(XMMRegister dst, XMMRegister src);
   void movups(XMMRegister dst, const Operand& src);
   void movups(const Operand& dst, XMMRegister src);
-  void paddd(XMMRegister dst, XMMRegister src);
-  void paddd(XMMRegister dst, const Operand& src);
-  void psubd(XMMRegister dst, XMMRegister src);
-  void psubd(XMMRegister dst, const Operand& src);
-  void pmulld(XMMRegister dst, XMMRegister src);
-  void pmulld(XMMRegister dst, const Operand& src);
-  void pmuludq(XMMRegister dst, XMMRegister src);
-  void pmuludq(XMMRegister dst, const Operand& src);
   void psrldq(XMMRegister dst, uint8_t shift);
   void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
-  void cvtps2dq(XMMRegister dst, XMMRegister src);
-  void cvtps2dq(XMMRegister dst, const Operand& src);
+  void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
   void cvtdq2ps(XMMRegister dst, XMMRegister src);
   void cvtdq2ps(XMMRegister dst, const Operand& src);
 
@@ -1421,7 +1509,6 @@
   AVX_P_3(vand, 0x54);
   AVX_P_3(vor, 0x56);
   AVX_P_3(vxor, 0x57);
-  AVX_3(vpcmpeqd, 0x76, vpd);
   AVX_3(vcvtsd2ss, 0x5a, vsd);
 
 #undef AVX_3
@@ -1440,102 +1527,98 @@
     emit(imm8);
   }
   void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
-    vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+    vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
   }
   void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+    vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
   }
   void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
     XMMRegister isrc2 = {src2.code()};
-    vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
+    vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
   }
   void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
+    vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
   }
   void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
     XMMRegister isrc2 = {src2.code()};
-    vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
+    vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
   }
   void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(0x2a, dst, src1, src2, kF3, k0F, kW0);
+    vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
   }
   void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
     XMMRegister isrc2 = {src2.code()};
-    vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
+    vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
   }
   void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(0x2a, dst, src1, src2, kF3, k0F, kW1);
+    vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
   }
   void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
     XMMRegister isrc2 = {src2.code()};
-    vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
+    vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
   }
   void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
+    vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
   }
   void vcvttss2si(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+    vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
   }
   void vcvttss2si(Register dst, const Operand& src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+    vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
   }
   void vcvttsd2si(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+    vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
   }
   void vcvttsd2si(Register dst, const Operand& src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+    vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
   }
   void vcvttss2siq(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+    vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
   }
   void vcvttss2siq(Register dst, const Operand& src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+    vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
   }
   void vcvttsd2siq(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
+    vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
   }
   void vcvttsd2siq(Register dst, const Operand& src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
+    vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
   }
   void vcvtsd2si(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
-    vsd(0x2d, idst, xmm0, src, kF2, k0F, kW0);
+    vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
   }
   void vucomisd(XMMRegister dst, XMMRegister src) {
-    vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
+    vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
   }
   void vucomisd(XMMRegister dst, const Operand& src) {
-    vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
+    vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
   }
   void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
                 RoundingMode mode) {
-    vsd(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+    vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
     emit(static_cast<byte>(mode) | 0x8);  // Mask precision exception.
   }
   void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
                 RoundingMode mode) {
-    vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+    vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
     emit(static_cast<byte>(mode) | 0x8);  // Mask precision exception.
   }
 
   void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
-    vsd(op, dst, src1, src2, kF2, k0F, kWIG);
+    vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
   }
   void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
-    vsd(op, dst, src1, src2, kF2, k0F, kWIG);
+    vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
   }
-  void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
-           SIMDPrefix pp, LeadingOpcode m, VexW w);
-  void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
-           SIMDPrefix pp, LeadingOpcode m, VexW w);
 
   void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
     vss(0x10, dst, src1, src2);
@@ -1616,6 +1699,101 @@
 
 #undef AVX_CMP_P
 
+  void vlddqu(XMMRegister dst, const Operand& src) {
+    vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
+  }
+  void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {6};
+    vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {2};
+    vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {4};
+    vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {6};
+    vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {2};
+    vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    XMMRegister iop = {4};
+    vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+  void vpextrb(Register dst, XMMRegister src, int8_t imm8) {
+    XMMRegister idst = {dst.code()};
+    vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+    vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpextrw(Register dst, XMMRegister src, int8_t imm8) {
+    XMMRegister idst = {dst.code()};
+    vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
+    emit(imm8);
+  }
+  void vpextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+    vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpextrd(Register dst, XMMRegister src, int8_t imm8) {
+    XMMRegister idst = {dst.code()};
+    vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+    vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+    XMMRegister isrc = {src2.code()};
+    vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+               int8_t imm8) {
+    vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+    XMMRegister isrc = {src2.code()};
+    vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
+    emit(imm8);
+  }
+  void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+               int8_t imm8) {
+    vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
+    emit(imm8);
+  }
+  void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+    XMMRegister isrc = {src2.code()};
+    vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+               int8_t imm8) {
+    vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
+    emit(imm8);
+  }
+  void vpshufd(XMMRegister dst, XMMRegister src, int8_t imm8) {
+    vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
+    emit(imm8);
+  }
+
   void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
   void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
   void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
@@ -1852,6 +2030,8 @@
   byte byte_at(int pos)  { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
 
+  Address pc() const { return pc_; }
+
  protected:
   // Call near indirect
   void call(const Operand& operand);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 4b5165a..2a962b3 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1175,6 +1175,7 @@
   __ Pop(rdx);
   __ Pop(rdi);
   __ Pop(rax);
+  __ SmiToInteger32(rdx, rdx);
   __ SmiToInteger32(rax, rax);
 }
 
@@ -1189,7 +1190,6 @@
   // rdi : the function to call
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_initialize_count, done_increment_count;
 
   // Load the cache state into r11.
   __ SmiToInteger32(rdx, rdx);
@@ -1203,7 +1203,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
-  __ j(equal, &done_increment_count, Label::kFar);
+  __ j(equal, &done, Label::kFar);
   __ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
@@ -1227,7 +1227,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
   __ cmpp(rdi, r11);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done_increment_count);
+  __ jmp(&done);
 
   __ bind(&miss);
 
@@ -1253,29 +1253,17 @@
 
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done_initialize_count);
+  __ jmp(&done);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
 
-  __ bind(&done_initialize_count);
-  // Initialize the call counter.
-  __ SmiToInteger32(rdx, rdx);
-  __ Move(FieldOperand(rbx, rdx, times_pointer_size,
-                       FixedArray::kHeaderSize + kPointerSize),
-          Smi::FromInt(1));
-  __ jmp(&done);
-
-  __ bind(&done_increment_count);
-
-  // Increment the call count for monomorphic function calls.
+  __ bind(&done);
+  // Increment the call count for all function calls.
   __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize),
                     Smi::FromInt(1));
-
-  __ bind(&done);
-  __ Integer32ToSmi(rdx, rdx);
 }
 
 
@@ -1294,7 +1282,6 @@
 
   GenerateRecordCallTarget(masm);
 
-  __ SmiToInteger32(rdx, rdx);
   Label feedback_register_initialized;
   // Put the AllocationSite from the feedback vector into rbx, or undefined.
   __ movp(rbx,
@@ -1321,6 +1308,12 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ SmiAddConstant(FieldOperand(feedback_vector, slot, times_pointer_size,
+                                 FixedArray::kHeaderSize + kPointerSize),
+                    Smi::FromInt(1));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // rdi - function
@@ -1334,9 +1327,7 @@
   __ movp(rax, Immediate(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize),
-                    Smi::FromInt(1));
+  IncrementCallCount(masm, rbx, rdx);
 
   __ movp(rbx, rcx);
   __ movp(rdx, rdi);
@@ -1352,7 +1343,7 @@
   // -- rbx - vector
   // -----------------------------------
   Isolate* isolate = masm->isolate();
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   StackArgumentsAccessor args(rsp, argc);
   ParameterCount actual(argc);
@@ -1383,12 +1374,10 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(rdi, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize),
-                    Smi::FromInt(1));
-
   __ bind(&call_function);
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, rbx, rdx);
+
   __ Set(rax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1428,6 +1417,11 @@
           TypeFeedbackVector::MegamorphicSentinel(isolate));
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, rbx, rdx);
+
+  __ bind(&call_count_incremented);
   __ Set(rax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1453,11 +1447,6 @@
   __ cmpp(rcx, NativeContextOperand());
   __ j(not_equal, &miss);
 
-  // Initialize the call counter.
-  __ Move(FieldOperand(rbx, rdx, times_pointer_size,
-                       FixedArray::kHeaderSize + kPointerSize),
-          Smi::FromInt(1));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // rbx - vector
   // rdx - slot (needs to be in smi form)
@@ -1467,11 +1456,16 @@
     CreateWeakCellStub create_stub(isolate);
 
     __ Integer32ToSmi(rdx, rdx);
+    __ Push(rbx);
+    __ Push(rdx);
     __ Push(rdi);
     __ Push(rsi);
     __ CallStub(&create_stub);
     __ Pop(rsi);
     __ Pop(rdi);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ SmiToInteger32(rdx, rdx);
   }
 
   __ jmp(&call_function);
@@ -1481,20 +1475,19 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ jmp(&call);
+  __ jmp(&call_count_incremented);
 
   // Unreachable
   __ int3();
 }
 
-
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
 
   // Push the receiver and the function and feedback info.
+  __ Integer32ToSmi(rdx, rdx);
   __ Push(rdi);
   __ Push(rbx);
-  __ Integer32ToSmi(rdx, rdx);
   __ Push(rdx);
 
   // Call the entry.
@@ -1504,7 +1497,6 @@
   __ movp(rdi, rax);
 }
 
-
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
@@ -2020,296 +2012,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  rsp[0]  : return address
-  //  rsp[8]  : to
-  //  rsp[16] : from
-  //  rsp[24] : string
-
-  enum SubStringStubArgumentIndices {
-    STRING_ARGUMENT_INDEX,
-    FROM_ARGUMENT_INDEX,
-    TO_ARGUMENT_INDEX,
-    SUB_STRING_ARGUMENT_COUNT
-  };
-
-  StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
-                              ARGUMENTS_DONT_CONTAIN_RECEIVER);
-
-  // Make sure first argument is a string.
-  __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
-  STATIC_ASSERT(kSmiTag == 0);
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
-  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
-  __ j(NegateCondition(is_string), &runtime);
-
-  // rax: string
-  // rbx: instance type
-  // Calculate length of sub string using the smi values.
-  __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
-  __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
-  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
-
-  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
-  __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
-  Label not_original_string;
-  // Shorter than original string's length: an actual substring.
-  __ j(below, &not_original_string, Label::kNear);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ j(above, &runtime);
-  // Return original string.
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-  __ bind(&not_original_string);
-
-  Label single_char;
-  __ SmiCompare(rcx, Smi::FromInt(1));
-  __ j(equal, &single_char);
-
-  __ SmiToInteger32(rcx, rcx);
-
-  // rax: string
-  // rbx: instance type
-  // rcx: sub string length
-  // rdx: from index (smi)
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into edi.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ testb(rbx, Immediate(kIsIndirectStringMask));
-  __ j(zero, &seq_or_external_string, Label::kNear);
-
-  __ testb(rbx, Immediate(kSlicedNotConsMask));
-  __ j(not_zero, &sliced_string, Label::kNear);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  // Flat cons strings have an empty second part.
-  __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
-                 Heap::kempty_stringRootIndex);
-  __ j(not_equal, &runtime);
-  __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
-  // Update instance type.
-  __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
-  __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
-  // Update instance type.
-  __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the correct register.
-  __ movp(rdi, rax);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // rdi: underlying subject string
-    // rbx: instance type of underlying subject string
-    // rdx: adjusted start index (smi)
-    // rcx: length
-    // If coming from the make_two_character_string path, the string
-    // is too short to be sliced anyways.
-    __ cmpp(rcx, Immediate(SlicedString::kMinLength));
-    // Short slice.  Copy instead of slicing.
-    __ j(less, &copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ testb(rbx, Immediate(kStringEncodingMask));
-    __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
-    __ jmp(&set_slice_header, Label::kNear);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
-    __ bind(&set_slice_header);
-    __ Integer32ToSmi(rcx, rcx);
-    __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
-    __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
-           Immediate(String::kEmptyHashField));
-    __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
-    __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
-    __ IncrementCounter(counters->sub_string_native(), 1);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&copy_routine);
-  }
-
-  // rdi: underlying subject string
-  // rbx: instance type of underlying subject string
-  // rdx: adjusted start index (smi)
-  // rcx: length
-  // The subject string can only be external or sequential string of either
-  // encoding at this point.
-  Label two_byte_sequential, sequential_string;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(rbx, Immediate(kExternalStringTag));
-  __ j(zero, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ testb(rbx, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &runtime);
-  __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&sequential_string);
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ testb(rbx, Immediate(kStringEncodingMask));
-  __ j(zero, &two_byte_sequential);
-
-  // Allocate the result.
-  __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
-
-  // rax: result string
-  // rcx: result string length
-  {  // Locate character of sub string start.
-    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
-    __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
-                        SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  }
-  // Locate first character of result.
-  __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
-
-  // rax: result string
-  // rcx: result length
-  // r14: first character of result
-  // rsi: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-
-  __ bind(&two_byte_sequential);
-  // Allocate the result.
-  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
-
-  // rax: result string
-  // rcx: result string length
-  {  // Locate character of sub string start.
-    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
-    __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
-                        SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  }
-  // Locate first character of result.
-  __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-
-  // rax: result string
-  // rcx: result length
-  // rdi: first character of result
-  // r14: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // rax: string
-  // rbx: instance type
-  // rcx: sub string length (smi)
-  // rdx: from index (smi)
-  StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
-                                  &runtime, RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in rax.
-  Label is_number;
-  __ JumpIfSmi(rax, &is_number, Label::kNear);
-
-  Label not_string;
-  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
-  // rax: receiver
-  // rdi: receiver map
-  __ j(above_equal, &not_string, Label::kNear);
-  __ Ret();
-  __ bind(&not_string);
-
-  Label not_heap_number;
-  __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(rdi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ PopReturnAddressTo(rcx);     // Pop return address.
-  __ Push(rax);                   // Push argument.
-  __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in rax.
-  Label is_number;
-  __ JumpIfSmi(rax, &is_number, Label::kNear);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(rax, LAST_NAME_TYPE, rdi);
-  // rax: receiver
-  // rdi: receiver map
-  __ j(above, &not_name, Label::kNear);
-  __ Ret();
-  __ bind(&not_name);
-
-  Label not_heap_number;
-  __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(rdi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ PopReturnAddressTo(rcx);     // Pop return address.
-  __ Push(rax);                   // Push argument.
-  __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3172,17 +2874,6 @@
   Label need_incremental;
   Label need_incremental_pop_object;
 
-  __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
-  __ andp(regs_.scratch0(), regs_.object());
-  __ movp(regs_.scratch1(),
-         Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset));
-  __ subp(regs_.scratch1(), Immediate(1));
-  __ movp(Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset),
-         regs_.scratch1());
-  __ j(negative, &need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(),
@@ -3575,7 +3266,7 @@
   __ jmp(feedback);
 
   __ bind(&transition_call);
-  DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+  DCHECK(receiver_map.is(StoreTransitionDescriptor::MapRegister()));
   __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
   // The weak cell may have been cleared.
   __ JumpIfSmi(receiver_map, miss);
@@ -4308,7 +3999,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
     __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4671,7 +4362,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
   __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 83f34d0..6adb820 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -11,6 +11,7 @@
 #include "src/base/compiler-specific.h"
 #include "src/base/lazy-instance.h"
 #include "src/disasm.h"
+#include "src/x64/sse-instr.h"
 
 namespace disasm {
 
@@ -875,6 +876,7 @@
   return 3;  // includes 0x0F
 }
 
+const char* sf_str[4] = {"", "rl", "ra", "ll"};
 
 int DisassemblerX64::AVXInstruction(byte* data) {
   byte opcode = *data;
@@ -949,6 +951,18 @@
         current += PrintRightOperand(current);
         AppendToBuffer(",%s", NameOfCPURegister(vvvv));
         break;
+#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
+                                 opcode)                                    \
+  case 0x##opcode: {                                                        \
+    AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop),    \
+                   NameOfXMMRegister(vvvv));                                \
+    current += PrintRightXMMOperand(current);                               \
+    break;                                                                  \
+  }
+
+        SSSE3_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+        SSE4_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+#undef DECLARE_SSE_AVX_DIS_CASE
       default:
         UnimplementedInstruction();
     }
@@ -968,6 +982,33 @@
         current += PrintRightXMMOperand(current);
         AppendToBuffer(",0x%x", *current++);
         break;
+      case 0x14:
+        AppendToBuffer("vpextrb ");
+        current += PrintRightByteOperand(current);
+        AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+        break;
+      case 0x15:
+        AppendToBuffer("vpextrw ");
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+        break;
+      case 0x16:
+        AppendToBuffer("vpextrd ");
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+        break;
+      case 0x20:
+        AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightByteOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
+      case 0x22:
+        AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
       default:
         UnimplementedInstruction();
     }
@@ -1112,6 +1153,10 @@
                        NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
         break;
+      case 0xf0:
+        AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+        break;
       default:
         UnimplementedInstruction();
     }
@@ -1326,16 +1371,28 @@
                        NameOfXMMRegister(regop));
         current += PrintRightOperand(current);
         break;
-      case 0x73:
-        AppendToBuffer("%s %s,", regop == 6 ? "vpsllq" : "vpsrlq",
+      case 0x70:
+        AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
+      case 0x71:
+        AppendToBuffer("vps%sw %s,", sf_str[regop / 2],
                        NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
         AppendToBuffer(",%u", *current++);
         break;
-      case 0x76:
-        AppendToBuffer("vpcmpeqd %s,%s,", NameOfXMMRegister(regop),
+      case 0x72:
+        AppendToBuffer("vps%sd %s,", sf_str[regop / 2],
                        NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
+        AppendToBuffer(",%u", *current++);
+        break;
+      case 0x73:
+        AppendToBuffer("vps%sq %s,", sf_str[regop / 2],
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",%u", *current++);
         break;
       case 0x7e:
         AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
@@ -1352,6 +1409,27 @@
         current += 1;
         break;
       }
+      case 0xc4:
+        AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
+      case 0xc5:
+        AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
+#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
+  case 0x##opcode: {                                                      \
+    AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop),  \
+                   NameOfXMMRegister(vvvv));                              \
+    current += PrintRightXMMOperand(current);                             \
+    break;                                                                \
+  }
+
+        SSE2_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+#undef DECLARE_SSE_AVX_DIS_CASE
       default:
         UnimplementedInstruction();
     }
@@ -1363,7 +1441,6 @@
   return static_cast<int>(current - data);
 }
 
-
 // Returns number of bytes used, including *data.
 int DisassemblerX64::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
@@ -1558,11 +1635,20 @@
     if (opcode == 0x38) {
       byte third_byte = *current;
       current = data + 3;
-      if (third_byte == 0x40) {
-        // pmulld xmm, xmm/m128
-        get_modrm(*current, &mod, &regop, &rm);
-        AppendToBuffer("pmulld %s,", NameOfXMMRegister(regop));
-        current += PrintRightXMMOperand(current);
+      get_modrm(*current, &mod, &regop, &rm);
+      switch (third_byte) {
+#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
+  case 0x##opcode: {                                                      \
+    AppendToBuffer(#instruction " %s,", NameOfXMMRegister(regop));        \
+    current += PrintRightXMMOperand(current);                             \
+    break;                                                                \
+  }
+
+        SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+        SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+#undef SSE34_DIS_CASE
+        default:
+          UnimplementedInstruction();
       }
     } else if (opcode == 0x3A) {
       byte third_byte = *current;
@@ -1586,12 +1672,31 @@
         current += PrintRightXMMOperand(current);
         AppendToBuffer(",0x%x", (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x14) {
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("pextrb ");  // reg/m32, xmm, imm8
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+        current += 1;
+      } else if (third_byte == 0x15) {
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("pextrw ");  // reg/m32, xmm, imm8
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+        current += 1;
       } else if (third_byte == 0x16) {
         get_modrm(*current, &mod, &regop, &rm);
         AppendToBuffer("pextrd ");  // reg/m32, xmm, imm8
         current += PrintRightOperand(current);
         AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x20) {
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("pinsrd ");  // xmm, reg/m32, imm8
+        AppendToBuffer(" %s,", NameOfXMMRegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%d", (*current) & 3);
+        current += 1;
       } else if (third_byte == 0x21) {
         get_modrm(*current, &mod, &regop, &rm);
         // insertps xmm, xmm/m32, imm8
@@ -1666,15 +1771,20 @@
         current += PrintRightXMMOperand(current);
         AppendToBuffer(",0x%x", *current);
         current += 1;
+      } else if (opcode == 0x71) {
+        current += 1;
+        AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+                       *current & 0x7f);
+        current += 1;
       } else if (opcode == 0x72) {
         current += 1;
-        AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
-                       NameOfXMMRegister(rm), *current & 0x7f);
+        AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+                       *current & 0x7f);
         current += 1;
       } else if (opcode == 0x73) {
         current += 1;
-        AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
-                       NameOfXMMRegister(rm), *current & 0x7f);
+        AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+                       *current & 0x7f);
         current += 1;
       } else if (opcode == 0xB1) {
         current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
@@ -1692,16 +1802,86 @@
           mnemonic = "ucomisd";
         } else if (opcode == 0x2F) {
           mnemonic = "comisd";
+        } else if (opcode == 0x64) {
+          mnemonic = "pcmpgtb";
+        } else if (opcode == 0x65) {
+          mnemonic = "pcmpgtw";
+        } else if (opcode == 0x66) {
+          mnemonic = "pcmpgtd";
+        } else if (opcode == 0x74) {
+          mnemonic = "pcmpeqb";
+        } else if (opcode == 0x75) {
+          mnemonic = "pcmpeqw";
         } else if (opcode == 0x76) {
           mnemonic = "pcmpeqd";
         } else if (opcode == 0x62) {
           mnemonic = "punpckldq";
+        } else if (opcode == 0x63) {
+          mnemonic = "packsswb";
+        } else if (opcode == 0x67) {
+          mnemonic = "packuswb";
         } else if (opcode == 0x6A) {
           mnemonic = "punpckhdq";
+        } else if (opcode == 0x6B) {
+          mnemonic = "packssdw";
+        } else if (opcode == 0xC4) {
+          mnemonic = "pinsrw";
+        } else if (opcode == 0xC5) {
+          mnemonic = "pextrw";
+        } else if (opcode == 0xD1) {
+          mnemonic = "psrlw";
+        } else if (opcode == 0xD2) {
+          mnemonic = "psrld";
+        } else if (opcode == 0xD5) {
+          mnemonic = "pmullw";
+        } else if (opcode == 0xD7) {
+          mnemonic = "pmovmskb";
+        } else if (opcode == 0xD8) {
+          mnemonic = "psubusb";
+        } else if (opcode == 0xD9) {
+          mnemonic = "psubusw";
+        } else if (opcode == 0xDA) {
+          mnemonic = "pminub";
+        } else if (opcode == 0xDC) {
+          mnemonic = "paddusb";
+        } else if (opcode == 0xDD) {
+          mnemonic = "paddusw";
+        } else if (opcode == 0xDE) {
+          mnemonic = "pmaxub";
+        } else if (opcode == 0xE1) {
+          mnemonic = "psraw";
+        } else if (opcode == 0xE2) {
+          mnemonic = "psrad";
+        } else if (opcode == 0xE8) {
+          mnemonic = "psubsb";
+        } else if (opcode == 0xE9) {
+          mnemonic = "psubsw";
+        } else if (opcode == 0xEA) {
+          mnemonic = "pminsw";
+        } else if (opcode == 0xEC) {
+          mnemonic = "paddsb";
+        } else if (opcode == 0xED) {
+          mnemonic = "paddsw";
+        } else if (opcode == 0xEE) {
+          mnemonic = "pmaxsw";
+        } else if (opcode == 0xEF) {
+          mnemonic = "pxor";
+        } else if (opcode == 0xF1) {
+          mnemonic = "psllw";
+        } else if (opcode == 0xF2) {
+          mnemonic = "pslld";
         } else if (opcode == 0xF4) {
           mnemonic = "pmuludq";
+        } else if (opcode == 0xF8) {
+          mnemonic = "psubb";
+        } else if (opcode == 0xF9) {
+          mnemonic = "psubw";
         } else if (opcode == 0xFA) {
           mnemonic = "psubd";
+        } else if (opcode == 0xFC) {
+          mnemonic = "paddb";
+        } else if (opcode == 0xFD) {
+          mnemonic = "paddw";
         } else if (opcode == 0xFE) {
           mnemonic = "paddd";
         } else if (opcode == 0xC2) {
@@ -1780,6 +1960,11 @@
                      NameOfXMMRegister(regop),
                      NameOfXMMRegister(rm));
       current += 2;
+    } else if (opcode == 0xF0) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
+      current += PrintRightOperand(current);
     } else {
       UnimplementedInstruction();
     }
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index 7d39b42..9e48644 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -40,13 +40,9 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return rdi; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return rbx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r11; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
+const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
+const Register StoreTransitionDescriptor::MapRegister() { return r11; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
@@ -356,7 +352,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       rdi,  // callee
@@ -391,7 +387,19 @@
       rax,  // argument count (not including receiver)
       rdx,  // new target
       rdi,  // constructor
-      rbx,  // address of first argument
+      rbx,  // allocation site feedback if available, undefined otherwise
+      rcx,  // address of first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rax,  // argument count (not including receiver)
+      rdx,  // target to the call. It is checked to be Array function.
+      rbx,  // allocation site feedback
+      rcx,  // address of first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 6dacc01..0fd6333 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -253,9 +253,8 @@
                                 Condition cc,
                                 Label* branch,
                                 Label::Distance distance) {
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cc, branch, distance);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch,
+                distance);
 }
 
 
@@ -3325,12 +3324,12 @@
     Movd(dst, src);
     return;
   }
-  DCHECK_EQ(1, imm8);
   if (CpuFeatures::IsSupported(SSE4_1)) {
     CpuFeatureScope sse_scope(this, SSE4_1);
     pextrd(dst, src, imm8);
     return;
   }
+  DCHECK_EQ(1, imm8);
   movq(dst, src);
   shrq(dst, Immediate(32));
 }
@@ -4974,7 +4973,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index d5e411f..a8d0c60 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -891,6 +891,18 @@
   // miss label if the weak cell was cleared.
   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp (on x64 it's at least return address).
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 1) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    UNIMPLEMENTED();
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the rsp register.
   void Drop(int stack_elements);
diff --git a/src/x64/sse-instr.h b/src/x64/sse-instr.h
new file mode 100644
index 0000000..0095727
--- /dev/null
+++ b/src/x64/sse-instr.h
@@ -0,0 +1,69 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SSE_INSTR_H_
+#define V8_SSE_INSTR_H_
+
+#define SSE2_INSTRUCTION_LIST(V) \
+  V(packsswb, 66, 0F, 63)        \
+  V(packssdw, 66, 0F, 6B)        \
+  V(packuswb, 66, 0F, 67)        \
+  V(paddb, 66, 0F, FC)           \
+  V(paddw, 66, 0F, FD)           \
+  V(paddd, 66, 0F, FE)           \
+  V(paddsb, 66, 0F, EC)          \
+  V(paddsw, 66, 0F, ED)          \
+  V(paddusb, 66, 0F, DC)         \
+  V(paddusw, 66, 0F, DD)         \
+  V(pcmpeqb, 66, 0F, 74)         \
+  V(pcmpeqw, 66, 0F, 75)         \
+  V(pcmpeqd, 66, 0F, 76)         \
+  V(pcmpgtb, 66, 0F, 64)         \
+  V(pcmpgtw, 66, 0F, 65)         \
+  V(pcmpgtd, 66, 0F, 66)         \
+  V(pmaxsw, 66, 0F, EE)          \
+  V(pmaxub, 66, 0F, DE)          \
+  V(pminsw, 66, 0F, EA)          \
+  V(pminub, 66, 0F, DA)          \
+  V(pmullw, 66, 0F, D5)          \
+  V(pmuludq, 66, 0F, F4)         \
+  V(psllw, 66, 0F, F1)           \
+  V(pslld, 66, 0F, F2)           \
+  V(psraw, 66, 0F, E1)           \
+  V(psrad, 66, 0F, E2)           \
+  V(psrlw, 66, 0F, D1)           \
+  V(psrld, 66, 0F, D2)           \
+  V(psubb, 66, 0F, F8)           \
+  V(psubw, 66, 0F, F9)           \
+  V(psubd, 66, 0F, FA)           \
+  V(psubsb, 66, 0F, E8)          \
+  V(psubsw, 66, 0F, E9)          \
+  V(psubusb, 66, 0F, D8)         \
+  V(psubusw, 66, 0F, D9)         \
+  V(pxor, 66, 0F, EF)            \
+  V(cvtps2dq, 66, 0F, 5B)
+
+#define SSSE3_INSTRUCTION_LIST(V) \
+  V(pabsb, 66, 0F, 38, 1C)        \
+  V(pabsw, 66, 0F, 38, 1D)        \
+  V(pabsd, 66, 0F, 38, 1E)        \
+  V(pshufb, 66, 0F, 38, 00)       \
+  V(psignb, 66, 0F, 38, 08)       \
+  V(psignw, 66, 0F, 38, 09)       \
+  V(psignd, 66, 0F, 38, 0A)
+
+#define SSE4_INSTRUCTION_LIST(V) \
+  V(packusdw, 66, 0F, 38, 2B)    \
+  V(pminsb, 66, 0F, 38, 38)      \
+  V(pminsd, 66, 0F, 38, 39)      \
+  V(pminuw, 66, 0F, 38, 3A)      \
+  V(pminud, 66, 0F, 38, 3B)      \
+  V(pmaxsb, 66, 0F, 38, 3C)      \
+  V(pmaxsd, 66, 0F, 38, 3D)      \
+  V(pmaxuw, 66, 0F, 38, 3E)      \
+  V(pmaxud, 66, 0F, 38, 3F)      \
+  V(pmulld, 66, 0F, 38, 40)      \
+  V(ptest, 66, 0F, 38, 17)
+
+#endif  // V8_SSE_INSTR_H_
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 02de67a..e70cbad 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -1130,7 +1130,6 @@
   // edi : the function to call
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
-  Label done_increment_count, done_initialize_count;
 
   // Load the cache state into ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1143,7 +1142,7 @@
   // type-feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
-  __ j(equal, &done_increment_count, Label::kFar);
+  __ j(equal, &done, Label::kFar);
   __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
   __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1166,7 +1165,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
   __ cmp(edi, ecx);
   __ j(not_equal, &megamorphic);
-  __ jmp(&done_increment_count, Label::kFar);
+  __ jmp(&done, Label::kFar);
 
   __ bind(&miss);
 
@@ -1195,26 +1194,17 @@
   // slot.
   CreateAllocationSiteStub create_stub(isolate);
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ jmp(&done_initialize_count);
+  __ jmp(&done);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(isolate);
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
-  __ bind(&done_initialize_count);
 
-  // Initialize the call counter.
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-  __ jmp(&done);
-
-  __ bind(&done_increment_count);
-  // Increment the call count for monomorphic function calls.
+  __ bind(&done);
+  // Increment the call count for all function calls.
   __ add(FieldOperand(ebx, edx, times_half_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize),
          Immediate(Smi::FromInt(1)));
-
-  __ bind(&done);
 }
 
 
@@ -1260,6 +1250,12 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+                               Register slot) {
+  __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(1)));
+}
 
 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // edi - function
@@ -1275,9 +1271,7 @@
                            FixedArray::kHeaderSize));
 
   // Increment the call count for monomorphic function calls.
-  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
+  IncrementCallCount(masm, ebx, edx);
 
   __ mov(ebx, ecx);
   __ mov(edx, edi);
@@ -1293,7 +1287,7 @@
   // edx - slot id
   // ebx - vector
   Isolate* isolate = masm->isolate();
-  Label extra_checks_or_miss, call, call_function;
+  Label extra_checks_or_miss, call, call_function, call_count_incremented;
   int argc = arg_count();
   ParameterCount actual(argc);
 
@@ -1322,12 +1316,11 @@
   // convincing us that we have a monomorphic JSFunction.
   __ JumpIfSmi(edi, &extra_checks_or_miss);
 
-  // Increment the call count for monomorphic function calls.
-  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-
   __ bind(&call_function);
+
+  // Increment the call count for monomorphic function calls.
+  IncrementCallCount(masm, ebx, edx);
+
   __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
                                                     tail_call_mode()),
@@ -1368,6 +1361,12 @@
       Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
 
   __ bind(&call);
+
+  // Increment the call count for megamorphic function calls.
+  IncrementCallCount(masm, ebx, edx);
+
+  __ bind(&call_count_incremented);
+
   __ Set(eax, argc);
   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
@@ -1393,11 +1392,6 @@
   __ cmp(ecx, NativeContextOperand());
   __ j(not_equal, &miss);
 
-  // Initialize the call counter.
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-
   // Store the function. Use a stub since we need a frame for allocation.
   // ebx - vector
   // edx - slot
@@ -1405,11 +1399,15 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     CreateWeakCellStub create_stub(isolate);
+    __ push(ebx);
+    __ push(edx);
     __ push(edi);
     __ push(esi);
     __ CallStub(&create_stub);
     __ pop(esi);
     __ pop(edi);
+    __ pop(edx);
+    __ pop(ebx);
   }
 
   __ jmp(&call_function);
@@ -1419,7 +1417,7 @@
   __ bind(&miss);
   GenerateMiss(masm);
 
-  __ jmp(&call);
+  __ jmp(&call_count_incremented);
 
   // Unreachable
   __ int3();
@@ -1910,297 +1908,6 @@
 }
 
 
-void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  esp[0]: return address
-  //  esp[4]: to
-  //  esp[8]: from
-  //  esp[12]: string
-
-  // Make sure first argument is a string.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(eax, &runtime);
-  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
-  __ j(NegateCondition(is_string), &runtime);
-
-  // eax: string
-  // ebx: instance type
-
-  // Calculate length of sub string using the smi values.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
-  __ JumpIfNotSmi(ecx, &runtime);
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
-  __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, edx);
-  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
-  Label not_original_string;
-  // Shorter than original string's length: an actual substring.
-  __ j(below, &not_original_string, Label::kNear);
-  // Longer than original string's length or negative: unsafe arguments.
-  __ j(above, &runtime);
-  // Return original string.
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-  __ bind(&not_original_string);
-
-  Label single_char;
-  __ cmp(ecx, Immediate(Smi::FromInt(1)));
-  __ j(equal, &single_char);
-
-  // eax: string
-  // ebx: instance type
-  // ecx: sub string length (smi)
-  // edx: from index (smi)
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into edi.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ test(ebx, Immediate(kIsIndirectStringMask));
-  __ j(zero, &seq_or_external_string, Label::kNear);
-
-  Factory* factory = isolate()->factory();
-  __ test(ebx, Immediate(kSlicedNotConsMask));
-  __ j(not_zero, &sliced_string, Label::kNear);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  // Flat cons strings have an empty second part.
-  __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
-         factory->empty_string());
-  __ j(not_equal, &runtime);
-  __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and adjust start index by offset.
-  __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
-  __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(edi, eax);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // edi: underlying subject string
-    // ebx: instance type of underlying subject string
-    // edx: adjusted start index (smi)
-    // ecx: length (smi)
-    __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
-    // Short slice.  Copy instead of slicing.
-    __ j(less, &copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ test(ebx, Immediate(kStringEncodingMask));
-    __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
-    __ jmp(&set_slice_header, Label::kNear);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
-    __ bind(&set_slice_header);
-    __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
-    __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
-           Immediate(String::kEmptyHashField));
-    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
-    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
-    __ IncrementCounter(counters->sub_string_native(), 1);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&copy_routine);
-  }
-
-  // edi: underlying subject string
-  // ebx: instance type of underlying subject string
-  // edx: adjusted start index (smi)
-  // ecx: length (smi)
-  // The subject string can only be external or sequential string of either
-  // encoding at this point.
-  Label two_byte_sequential, runtime_drop_two, sequential_string;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ebx, Immediate(kExternalStringTag));
-  __ j(zero, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(ebx, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &runtime);
-  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&sequential_string);
-  // Stash away (adjusted) index and (underlying) string.
-  __ push(edx);
-  __ push(edi);
-  __ SmiUntag(ecx);
-  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ test_b(ebx, Immediate(kStringEncodingMask));
-  __ j(zero, &two_byte_sequential);
-
-  // Sequential one byte string.  Allocate the result.
-  __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
-  // eax: result string
-  // ecx: result string length
-  // Locate first character of result.
-  __ mov(edi, eax);
-  __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  // Load string argument and locate character of sub string start.
-  __ pop(edx);
-  __ pop(ebx);
-  __ SmiUntag(ebx);
-  __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
-
-  // eax: result string
-  // ecx: result length
-  // edi: first character of result
-  // edx: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  __ bind(&two_byte_sequential);
-  // Sequential two-byte string.  Allocate the result.
-  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
-  // eax: result string
-  // ecx: result string length
-  // Locate first character of result.
-  __ mov(edi, eax);
-  __ add(edi,
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Load string argument and locate character of sub string start.
-  __ pop(edx);
-  __ pop(ebx);
-  // As from is a smi it is 2 times the value which matches the size of a two
-  // byte character.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
-  // eax: result string
-  // ecx: result length
-  // edi: first character of result
-  // edx: character of sub string start
-  StringHelper::GenerateCopyCharacters(
-      masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  // Drop pushed values on the stack before tail call.
-  __ bind(&runtime_drop_two);
-  __ Drop(2);
-
-  // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString);
-
-  __ bind(&single_char);
-  // eax: string
-  // ebx: instance type
-  // ecx: sub string length (smi)
-  // edx: from index (smi)
-  StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
-                                  &runtime, RECEIVER_IS_STRING);
-  generator.GenerateFast(masm);
-  __ ret(3 * kPointerSize);
-  generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
-  // The ToString stub takes one argument in eax.
-  Label is_number;
-  __ JumpIfSmi(eax, &is_number, Label::kNear);
-
-  Label not_string;
-  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
-  // eax: receiver
-  // edi: receiver map
-  __ j(above_equal, &not_string, Label::kNear);
-  __ Ret();
-  __ bind(&not_string);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
-  // The ToName stub takes one argument in eax.
-  Label is_number;
-  __ JumpIfSmi(eax, &is_number, Label::kNear);
-
-  Label not_name;
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
-  // eax: receiver
-  // edi: receiver map
-  __ j(above, &not_name, Label::kNear);
-  __ Ret();
-  __ bind(&not_name);
-
-  Label not_heap_number;
-  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &not_heap_number, Label::kNear);
-  __ bind(&is_number);
-  NumberToStringStub stub(isolate());
-  __ TailCallStub(&stub);
-  __ bind(&not_heap_number);
-
-  Label not_oddball;
-  __ CmpInstanceType(edi, ODDBALL_TYPE);
-  __ j(not_equal, &not_oddball, Label::kNear);
-  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToName);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3040,17 +2747,6 @@
     Mode mode) {
   Label object_is_black, need_incremental, need_incremental_pop_object;
 
-  __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
-  __ and_(regs_.scratch0(), regs_.object());
-  __ mov(regs_.scratch1(),
-         Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset));
-  __ sub(regs_.scratch1(), Immediate(1));
-  __ mov(Operand(regs_.scratch0(),
-                 MemoryChunk::kWriteBarrierCounterOffset),
-         regs_.scratch1());
-  __ j(negative, &need_incremental);
-
   // Let's look at the color of the object:  If it is not black we don't have
   // to inform the incremental marker.
   __ JumpIfBlack(regs_.object(),
@@ -3392,11 +3088,10 @@
   Label load_smi_map, compare_map;
   Label start_polymorphic;
   Label pop_and_miss;
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   __ push(receiver);
-  __ push(vector);
+  // Value, vector and slot are passed on the stack, so no need to save/restore
+  // them.
 
   Register receiver_map = receiver;
   Register cached_map = vector;
@@ -3417,12 +3112,9 @@
   Register handler = feedback;
   DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
   __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(vector);
   __ pop(receiver);
   __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), handler);
-  __ pop(handler);  // Pop "value".
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(handler);
 
   // Polymorphic, we have to loop from 2 to N
   __ bind(&start_polymorphic);
@@ -3446,11 +3138,8 @@
                                FixedArray::kHeaderSize + kPointerSize));
   __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
-  __ mov(Operand::StaticVariable(virtual_register), handler);
-  __ pop(handler);  // Pop "value".
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(handler);
 
   __ bind(&prepare_next);
   __ add(counter, Immediate(Smi::FromInt(2)));
@@ -3460,7 +3149,6 @@
   // We exhausted our array of map handler pairs.
   __ bind(&pop_and_miss);
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ jmp(miss);
 
@@ -3476,8 +3164,6 @@
                                        Label* miss) {
   // The store ic value is on the stack.
   DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
 
   // feedback initially contains the feedback array
   Label compare_smi_map;
@@ -3493,11 +3179,8 @@
   __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize));
   __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // Put the store ic value back in it's register.
-  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
-  __ pop(weak_cell);  // Pop "value".
   // jump to the handler.
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(weak_cell);
 
   // In microbenchmarks, it made sense to unroll this code so that the call to
   // the handler is duplicated for a HeapObject receiver and a Smi receiver.
@@ -3507,10 +3190,8 @@
   __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
                                  FixedArray::kHeaderSize + kPointerSize));
   __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
-  __ pop(weak_cell);  // Pop "value".
   // jump to the handler.
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(weak_cell);
 }
 
 void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
@@ -3521,7 +3202,26 @@
   Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
   Label miss;
 
-  __ push(value);
+  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+    // Current stack layout:
+    // - esp[8]    -- value
+    // - esp[4]    -- slot
+    // - esp[0]    -- return address
+    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+    if (in_frame) {
+      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+      // If the vector is not on the stack, then insert the vector beneath
+      // return address in order to prepare for calling handler with
+      // StoreWithVector calling convention.
+      __ push(Operand(esp, 0));
+      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+      __ RecordComment("]");
+    } else {
+      __ mov(vector, Operand(esp, 1 * kPointerSize));
+    }
+    __ mov(slot, Operand(esp, 2 * kPointerSize));
+  }
 
   Register scratch = value;
   __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3545,19 +3245,9 @@
   __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &miss);
 
-  __ pop(value);
-  __ push(slot);
-  __ push(vector);
   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
                                                      no_reg);
-  __ pop(vector);
-  __ pop(slot);
-  Label no_pop_miss;
-  __ jmp(&no_pop_miss);
-
   __ bind(&miss);
-  __ pop(value);
-  __ bind(&no_pop_miss);
   StoreIC::GenerateMiss(masm);
 }
 
@@ -3579,17 +3269,13 @@
   Label load_smi_map, compare_map;
   Label transition_call;
   Label pop_and_miss;
-  ExternalReference virtual_register =
-      ExternalReference::virtual_handler_register(masm->isolate());
-  ExternalReference virtual_slot =
-      ExternalReference::virtual_slot_register(masm->isolate());
 
   __ push(receiver);
-  __ push(vector);
+  // Value, vector and slot are passed on the stack, so no need to save/restore
+  // them.
 
   Register receiver_map = receiver;
   Register cached_map = vector;
-  Register value = StoreDescriptor::ValueRegister();
 
   // Receiver might not be a heap object.
   __ JumpIfSmi(receiver, &load_smi_map);
@@ -3600,15 +3286,18 @@
   __ push(key);
   // Current stack layout:
   // - esp[0]    -- key
-  // - esp[4]    -- vector
-  // - esp[8]    -- receiver
-  // - esp[12]   -- value
-  // - esp[16]   -- return address
+  // - esp[4]    -- receiver
+  // - esp[8]    -- return address
+  // - esp[12]   -- vector
+  // - esp[16]   -- slot
+  // - esp[20]   -- value
   //
-  // Required stack layout for handler call:
+  // Required stack layout for handler call (see StoreWithVectorDescriptor):
   // - esp[0]    -- return address
-  // - receiver, key, value, vector, slot in registers.
-  // - handler in virtual register.
+  // - esp[4]    -- vector
+  // - esp[8]    -- slot
+  // - esp[12]   -- value
+  // - receiver, key, handler in registers.
   Register counter = key;
   __ mov(counter, Immediate(Smi::FromInt(0)));
   __ bind(&next_loop);
@@ -3623,43 +3312,57 @@
   __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
                                 FixedArray::kHeaderSize + 2 * kPointerSize));
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), feedback);
-  __ pop(value);
-  __ jmp(Operand::StaticVariable(virtual_register));
+  __ jmp(feedback);
 
   __ bind(&transition_call);
   // Current stack layout:
   // - esp[0]    -- key
-  // - esp[4]    -- vector
-  // - esp[8]    -- receiver
-  // - esp[12]   -- value
-  // - esp[16]   -- return address
+  // - esp[4]    -- receiver
+  // - esp[8]    -- return address
+  // - esp[12]   -- vector
+  // - esp[16]   -- slot
+  // - esp[20]   -- value
   //
-  // Required stack layout for handler call:
+  // Required stack layout for handler call (see StoreTransitionDescriptor):
   // - esp[0]    -- return address
-  // - receiver, key, value, map, vector in registers.
-  // - handler and slot in virtual registers.
-  __ mov(Operand::StaticVariable(virtual_slot), slot);
+  // - esp[4]    -- vector
+  // - esp[8]    -- slot
+  // - esp[12]   -- value
+  // - receiver, key, map, handler in registers.
   __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
                                 FixedArray::kHeaderSize + 2 * kPointerSize));
   __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ mov(Operand::StaticVariable(virtual_register), feedback);
 
   __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
   // The weak cell may have been cleared.
   __ JumpIfSmi(cached_map, &pop_and_miss);
-  DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
-  __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+  DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
+  __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
 
-  // Pop key into place.
+  // Call store transition handler using StoreTransitionDescriptor calling
+  // convention.
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
-  __ pop(value);
-  __ jmp(Operand::StaticVariable(virtual_register));
+  // Ensure that the transition handler we are going to call has the same
+  // number of stack arguments which means that we don't have to adapt them
+  // before the call.
+  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+  STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kValue ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kValue);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kSlot ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kSlot);
+  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+                    StoreWithVectorDescriptor::kVector ==
+                StoreTransitionDescriptor::kParameterCount -
+                    StoreTransitionDescriptor::kVector);
+  __ jmp(feedback);
 
   __ bind(&prepare_next);
   __ add(counter, Immediate(Smi::FromInt(3)));
@@ -3669,7 +3372,6 @@
   // We exhausted our array of map handler pairs.
   __ bind(&pop_and_miss);
   __ pop(key);
-  __ pop(vector);
   __ pop(receiver);
   __ jmp(miss);
 
@@ -3686,7 +3388,26 @@
   Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
   Label miss;
 
-  __ push(value);
+  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+    // Current stack layout:
+    // - esp[8]    -- value
+    // - esp[4]    -- slot
+    // - esp[0]    -- return address
+    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+    if (in_frame) {
+      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+      // If the vector is not on the stack, then insert the vector beneath
+      // return address in order to prepare for calling handler with
+      // StoreWithVector calling convention.
+      __ push(Operand(esp, 0));
+      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+      __ RecordComment("]");
+    } else {
+      __ mov(vector, Operand(esp, 1 * kPointerSize));
+    }
+    __ mov(slot, Operand(esp, 2 * kPointerSize));
+  }
 
   Register scratch = value;
   __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3711,8 +3432,6 @@
   __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &try_poly_name);
 
-  __ pop(value);
-
   Handle<Code> megamorphic_stub =
       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
@@ -3729,7 +3448,6 @@
                              &miss);
 
   __ bind(&miss);
-  __ pop(value);
   KeyedStoreIC::GenerateMiss(masm);
 }
 
@@ -4358,7 +4076,7 @@
     // Fall back to %AllocateInNewSpace (if not too big).
     Label too_big_for_new_space;
     __ bind(&allocate);
-    __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+    __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
     __ j(greater, &too_big_for_new_space);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4745,7 +4463,7 @@
   // Fall back to %AllocateInNewSpace (if not too big).
   Label too_big_for_new_space;
   __ bind(&allocate);
-  __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+  __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
   __ j(greater, &too_big_for_new_space);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index 4ef88e8..85b26ca 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -39,19 +39,11 @@
 
 const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
 
-const Register VectorStoreTransitionDescriptor::SlotRegister() {
-  return no_reg;
-}
+const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
 
+const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
 
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-
-
-const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
-
+const Register StoreTransitionDescriptor::MapRegister() { return edi; }
 
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
@@ -363,7 +355,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       edi,  // callee
@@ -398,7 +390,19 @@
       eax,  // argument count (not including receiver)
       edx,  // new target
       edi,  // constructor
-      ebx,  // address of first argument
+      ebx,  // allocation site feedback
+      ecx,  // address of first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // argument count (not including receiver)
+      edx,  // target to the call. It is checked to be Array function.
+      ebx,  // allocation site feedback
+      ecx,  // address of first argument
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 9ffbf9f..dafe985 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -167,9 +167,8 @@
 void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
                                 Label* condition_met,
                                 Label::Distance distance) {
-  const int mask =
-      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
-  CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
+  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
+                condition_met, distance);
 }
 
 
@@ -1487,7 +1486,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= kMaxRegularHeapObjectSize);
   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 13988ae..bdd3c03 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -787,6 +787,24 @@
   // may be bigger than 2^16 - 1.  Requires a scratch register.
   void Ret(int bytes_dropped, Register scratch);
 
+  // Emit code that loads |parameter_index|'th parameter from the stack to
+  // the register according to the CallInterfaceDescriptor definition.
+  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+  // below the caller's sp (on x87 it's at least return address).
+  template <class Descriptor>
+  void LoadParameterFromStack(
+      Register reg, typename Descriptor::ParameterIndices parameter_index,
+      int sp_to_ra_offset_in_words = 1) {
+    DCHECK(Descriptor::kPassLastArgsOnStack);
+    DCHECK_LT(parameter_index, Descriptor::kParameterCount);
+    DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
+              parameter_index);
+    int offset = (Descriptor::kParameterCount - parameter_index - 1 +
+                  sp_to_ra_offset_in_words) *
+                 kPointerSize;
+    mov(reg, Operand(esp, offset));
+  }
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the esp register.
   void Drop(int element_count);
diff --git a/src/zone/accounting-allocator.cc b/src/zone/accounting-allocator.cc
new file mode 100644
index 0000000..663ea32
--- /dev/null
+++ b/src/zone/accounting-allocator.cc
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/accounting-allocator.h"
+
+#include <cstdlib>
+
+#if V8_LIBC_BIONIC
+#include <malloc.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
+  void* memory = malloc(bytes);
+  if (memory) {
+    base::AtomicWord current =
+        base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+    base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
+    while (current > max) {
+      max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
+    }
+  }
+  return reinterpret_cast<Segment*>(memory);
+}
+
+void AccountingAllocator::FreeSegment(Segment* memory) {
+  base::NoBarrier_AtomicIncrement(
+      &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
+  memory->ZapHeader();
+  free(memory);
+}
+
+size_t AccountingAllocator::GetCurrentMemoryUsage() const {
+  return base::NoBarrier_Load(&current_memory_usage_);
+}
+
+size_t AccountingAllocator::GetMaxMemoryUsage() const {
+  return base::NoBarrier_Load(&max_memory_usage_);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/zone/accounting-allocator.h b/src/zone/accounting-allocator.h
new file mode 100644
index 0000000..31016a5
--- /dev/null
+++ b/src/zone/accounting-allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ACCOUNTING_ALLOCATOR_H_
+#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "src/zone/zone-segment.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE AccountingAllocator {
+ public:
+  AccountingAllocator() = default;
+  virtual ~AccountingAllocator() = default;
+
+  virtual Segment* AllocateSegment(size_t bytes);
+  virtual void FreeSegment(Segment* memory);
+
+  size_t GetCurrentMemoryUsage() const;
+  size_t GetMaxMemoryUsage() const;
+
+ private:
+  base::AtomicWord current_memory_usage_ = 0;
+  base::AtomicWord max_memory_usage_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_ZONE_ACCOUNTING_ALLOCATOR_H_
diff --git a/src/zone-allocator.h b/src/zone/zone-allocator.h
similarity index 66%
rename from src/zone-allocator.h
rename to src/zone/zone-allocator.h
index f46151e..8370d73 100644
--- a/src/zone-allocator.h
+++ b/src/zone/zone-allocator.h
@@ -2,17 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_ZONE_ALLOCATOR_H_
-#define V8_ZONE_ALLOCATOR_H_
-
+#ifndef V8_ZONE_ZONE_ALLOCATOR_H_
+#define V8_ZONE_ZONE_ALLOCATOR_H_
 #include <limits>
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
-template<typename T>
+template <typename T>
 class zone_allocator {
  public:
   typedef T* pointer;
@@ -22,31 +21,34 @@
   typedef T value_type;
   typedef size_t size_type;
   typedef ptrdiff_t difference_type;
-  template<class O> struct rebind {
+  template <class O>
+  struct rebind {
     typedef zone_allocator<O> other;
   };
 
   explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
   explicit zone_allocator(const zone_allocator& other) throw()
       : zone_(other.zone_) {}
-  template<typename U> zone_allocator(const zone_allocator<U>& other) throw()
-      : zone_(other.zone_) {}
-  template<typename U> friend class zone_allocator;
+  template <typename U>
+  zone_allocator(const zone_allocator<U>& other) throw() : zone_(other.zone_) {}
+  template <typename U>
+  friend class zone_allocator;
 
-  pointer address(reference x) const {return &x;}
-  const_pointer address(const_reference x) const {return &x;}
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
 
   pointer allocate(size_type n, const void* hint = 0) {
-    return static_cast<pointer>(zone_->NewArray<value_type>(
-            static_cast<int>(n)));
+    return static_cast<pointer>(
+        zone_->NewArray<value_type>(static_cast<int>(n)));
   }
-  void deallocate(pointer p, size_type) { /* noop for Zones */ }
+  void deallocate(pointer p, size_type) { /* noop for Zones */
+  }
 
   size_type max_size() const throw() {
     return std::numeric_limits<int>::max() / sizeof(value_type);
   }
   void construct(pointer p, const T& val) {
-    new(static_cast<void*>(p)) T(val);
+    new (static_cast<void*>(p)) T(val);
   }
   void destroy(pointer p) { p->~T(); }
 
@@ -69,4 +71,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_ZONE_ALLOCATOR_H_
+#endif  // V8_ZONE_ZONE_ALLOCATOR_H_
diff --git a/src/zone-containers.h b/src/zone/zone-containers.h
similarity index 85%
rename from src/zone-containers.h
rename to src/zone/zone-containers.h
index 79b168c..0aecd98 100644
--- a/src/zone-containers.h
+++ b/src/zone/zone-containers.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_ZONE_CONTAINERS_H_
-#define V8_ZONE_CONTAINERS_H_
+#ifndef V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#define V8_SRC_ZONE_ZONE_CONTAINERS_H_
 
 #include <deque>
 #include <list>
@@ -13,7 +13,7 @@
 #include <stack>
 #include <vector>
 
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
@@ -38,7 +38,6 @@
       : std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
 };
 
-
 // A wrapper subclass std::deque to make it easy to construct one
 // that uses a zone allocator.
 template <typename T>
@@ -49,7 +48,6 @@
       : std::deque<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
 };
 
-
 // A wrapper subclass std::list to make it easy to construct one
 // that uses a zone allocator.
 // TODO(mstarzinger): This should be renamed to ZoneList once we got rid of our
@@ -62,7 +60,6 @@
       : std::list<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
 };
 
-
 // A wrapper subclass std::priority_queue to make it easy to construct one
 // that uses a zone allocator.
 template <typename T, typename Compare = std::less<T>>
@@ -75,7 +72,6 @@
                                                        ZoneVector<T>(zone)) {}
 };
 
-
 // A wrapper subclass for std::queue to make it easy to construct one
 // that uses a zone allocator.
 template <typename T>
@@ -86,7 +82,6 @@
       : std::queue<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
 };
 
-
 // A wrapper subclass for std::stack to make it easy to construct one that uses
 // a zone allocator.
 template <typename T>
@@ -97,7 +92,6 @@
       : std::stack<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
 };
 
-
 // A wrapper subclass for std::set to make it easy to construct one that uses
 // a zone allocator.
 template <typename K, typename Compare = std::less<K>>
@@ -109,7 +103,6 @@
                                                 zone_allocator<K>(zone)) {}
 };
 
-
 // A wrapper subclass for std::map to make it easy to construct one that uses
 // a zone allocator.
 template <typename K, typename V, typename Compare = std::less<K>>
@@ -122,6 +115,18 @@
             Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
 };
 
+// A wrapper subclass for std::multimap to make it easy to construct one that
+// uses a zone allocator.
+template <typename K, typename V, typename Compare = std::less<K>>
+class ZoneMultimap
+    : public std::multimap<K, V, Compare,
+                           zone_allocator<std::pair<const K, V>>> {
+ public:
+  // Constructs an empty multimap.
+  explicit ZoneMultimap(Zone* zone)
+      : std::multimap<K, V, Compare, zone_allocator<std::pair<const K, V>>>(
+            Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
+};
 
 // Typedefs to shorten commonly used vectors.
 typedef ZoneVector<bool> BoolVector;
@@ -130,4 +135,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_ZONE_CONTAINERS_H_
+#endif  // V8_SRC_ZONE_ZONE_CONTAINERS_H_
diff --git a/src/zone/zone-segment.cc b/src/zone/zone-segment.cc
new file mode 100644
index 0000000..f63b530
--- /dev/null
+++ b/src/zone/zone-segment.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/zone-segment.h"
+
+namespace v8 {
+namespace internal {
+
+void Segment::ZapContents() {
+#ifdef DEBUG
+  memset(start(), kZapDeadByte, capacity());
+#endif
+}
+
+void Segment::ZapHeader() {
+#ifdef DEBUG
+  memset(this, kZapDeadByte, sizeof(Segment));
+#endif
+}
+}  // namespace internal
+}  // namespace v8
diff --git a/src/zone/zone-segment.h b/src/zone/zone-segment.h
new file mode 100644
index 0000000..d37cf56
--- /dev/null
+++ b/src/zone/zone-segment.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_SEGMENT_H_
+#define V8_ZONE_ZONE_SEGMENT_H_
+
+#include "src/v8.h"
+
+// Segments represent chunks of memory: They have starting address
+// (encoded in the this pointer) and a size in bytes. Segments are
+// chained together forming a LIFO structure with the newest segment
+// available as segment_head_. Segments are allocated using malloc()
+// and de-allocated using free().
+namespace v8 {
+namespace internal {
+
+//  Forward declaration
+class Zone;
+
+class Segment {
+ public:
+  void Initialize(Segment* next, size_t size, Zone* zone) {
+    next_ = next;
+    size_ = size;
+    zone_ = zone;
+  }
+
+  Zone* zone() const { return zone_; }
+  void set_zone(Zone* const zone) { zone_ = zone; }
+
+  Segment* next() const { return next_; }
+  void set_next(Segment* const next) { next_ = next; }
+
+  size_t size() const { return size_; }
+  size_t capacity() const { return size_ - sizeof(Segment); }
+
+  Address start() const { return address(sizeof(Segment)); }
+  Address end() const { return address(size_); }
+
+  // Zap the contents of the segment (but not the header).
+  void ZapContents();
+  // Zaps the header and makes the segment unusable this way.
+  void ZapHeader();
+
+ private:
+#ifdef DEBUG
+  // Constant byte value used for zapping dead memory in debug mode.
+  static const unsigned char kZapDeadByte = 0xcd;
+#endif
+  // Computes the address of the nth byte in this segment.
+  Address address(size_t n) const { return Address(this) + n; }
+
+  Zone* zone_;
+  Segment* next_;
+  size_t size_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_ZONE_ZONE_SEGMENT_H_
diff --git a/src/zone.cc b/src/zone/zone.cc
similarity index 77%
rename from src/zone.cc
rename to src/zone/zone.cc
index a10b636..4272e17 100644
--- a/src/zone.cc
+++ b/src/zone/zone.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/zone.h"
+#include "src/zone/zone.h"
 
 #include <cstring>
 
@@ -41,38 +41,7 @@
 
 }  // namespace
 
-
-// Segments represent chunks of memory: They have starting address
-// (encoded in the this pointer) and a size in bytes. Segments are
-// chained together forming a LIFO structure with the newest segment
-// available as segment_head_. Segments are allocated using malloc()
-// and de-allocated using free().
-
-class Segment {
- public:
-  void Initialize(Segment* next, size_t size) {
-    next_ = next;
-    size_ = size;
-  }
-
-  Segment* next() const { return next_; }
-  void clear_next() { next_ = nullptr; }
-
-  size_t size() const { return size_; }
-  size_t capacity() const { return size_ - sizeof(Segment); }
-
-  Address start() const { return address(sizeof(Segment)); }
-  Address end() const { return address(size_); }
-
- private:
-  // Computes the address of the nth byte in this segment.
-  Address address(size_t n) const { return Address(this) + n; }
-
-  Segment* next_;
-  size_t size_;
-};
-
-Zone::Zone(base::AccountingAllocator* allocator)
+Zone::Zone(AccountingAllocator* allocator)
     : allocation_size_(0),
       segment_bytes_allocated_(0),
       position_(0),
@@ -87,7 +56,6 @@
   DCHECK(segment_bytes_allocated_ == 0);
 }
 
-
 void* Zone::New(size_t size) {
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
@@ -123,13 +91,7 @@
   return reinterpret_cast<void*>(result);
 }
 
-
 void Zone::DeleteAll() {
-#ifdef DEBUG
-  // Constant byte value used for zapping dead memory in debug mode.
-  static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
   // Find a segment with a suitable size to keep around.
   Segment* keep = nullptr;
   // Traverse the chained list of segments, zapping (in debug mode)
@@ -139,16 +101,16 @@
     if (!keep && current->size() <= kMaximumKeptSegmentSize) {
       // Unlink the segment we wish to keep from the list.
       keep = current;
-      keep->clear_next();
+      keep->set_next(nullptr);
     } else {
       size_t size = current->size();
 #ifdef DEBUG
       // Un-poison first so the zapping doesn't trigger ASan complaints.
       ASAN_UNPOISON_MEMORY_REGION(current, size);
-      // Zap the entire current segment (including the header).
-      memset(current, kZapDeadByte, size);
 #endif
-      DeleteSegment(current, size);
+      current->ZapContents();
+      segment_bytes_allocated_ -= size;
+      allocator_->FreeSegment(current);
     }
     current = next;
   }
@@ -163,10 +125,7 @@
     limit_ = keep->end();
     // Un-poison so we can re-use the segment later.
     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
-#ifdef DEBUG
-    // Zap the contents of the kept segment (but not the header).
-    memset(start, kZapDeadByte, keep->capacity());
-#endif
+    keep->ZapContents();
   } else {
     position_ = limit_ = 0;
   }
@@ -176,50 +135,35 @@
   segment_head_ = keep;
 }
 
-
 void Zone::DeleteKeptSegment() {
-#ifdef DEBUG
-  // Constant byte value used for zapping dead memory in debug mode.
-  static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
   DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
   if (segment_head_ != nullptr) {
     size_t size = segment_head_->size();
 #ifdef DEBUG
     // Un-poison first so the zapping doesn't trigger ASan complaints.
     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
-    // Zap the entire kept segment (including the header).
-    memset(segment_head_, kZapDeadByte, size);
 #endif
-    DeleteSegment(segment_head_, size);
+    segment_head_->ZapContents();
+    segment_bytes_allocated_ -= size;
+    allocator_->FreeSegment(segment_head_);
     segment_head_ = nullptr;
   }
 
   DCHECK(segment_bytes_allocated_ == 0);
 }
 
-
 // Creates a new segment, sets it size, and pushes it to the front
 // of the segment chain. Returns the new segment.
 Segment* Zone::NewSegment(size_t size) {
-  Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
+  Segment* result = allocator_->AllocateSegment(size);
   segment_bytes_allocated_ += size;
   if (result != nullptr) {
-    result->Initialize(segment_head_, size);
+    result->Initialize(segment_head_, size, this);
     segment_head_ = result;
   }
   return result;
 }
 
-
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, size_t size) {
-  segment_bytes_allocated_ -= size;
-  allocator_->Free(segment, size);
-}
-
-
 Address Zone::NewExpand(size_t size) {
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
diff --git a/src/zone.h b/src/zone/zone.h
similarity index 90%
rename from src/zone.h
rename to src/zone/zone.h
index 29055cb..9ff259e 100644
--- a/src/zone.h
+++ b/src/zone/zone.h
@@ -2,25 +2,21 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_ZONE_H_
-#define V8_ZONE_H_
+#ifndef V8_ZONE_ZONE_H_
+#define V8_ZONE_ZONE_H_
 
 #include <limits>
 
-#include "src/base/accounting-allocator.h"
 #include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/globals.h"
 #include "src/list.h"
 #include "src/splay-tree.h"
+#include "src/zone/accounting-allocator.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class Segment;
-
-
 // The Zone supports very fast allocation of small chunks of
 // memory. The chunks cannot be deallocated individually, but instead
 // the Zone supports deallocating all chunks in one fast
@@ -33,9 +29,9 @@
 //
 // Note: The implementation is inherently not thread safe. Do not use
 // from multi-threaded code.
-class Zone final {
+class V8_EXPORT_PRIVATE Zone final {
  public:
-  explicit Zone(base::AccountingAllocator* allocator);
+  explicit Zone(AccountingAllocator* allocator);
   ~Zone();
 
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -64,12 +60,12 @@
 
   size_t allocation_size() const { return allocation_size_; }
 
-  base::AccountingAllocator* allocator() const { return allocator_; }
+  AccountingAllocator* allocator() const { return allocator_; }
 
  private:
-  // All pointers returned from New() have this alignment.  In addition, if the
-  // object being allocated has a size that is divisible by 8 then its alignment
-  // will be 8. ASan requires 8-byte alignment.
+// All pointers returned from New() have this alignment.  In addition, if the
+// object being allocated has a size that is divisible by 8 then its alignment
+// will be 8. ASan requires 8-byte alignment.
 #ifdef V8_USE_ADDRESS_SANITIZER
   static const size_t kAlignment = 8;
   STATIC_ASSERT(kPointerSize <= 8);
@@ -107,21 +103,17 @@
   // of the segment chain. Returns the new segment.
   inline Segment* NewSegment(size_t size);
 
-  // Deletes the given segment. Does not touch the segment chain.
-  inline void DeleteSegment(Segment* segment, size_t size);
-
   // The free region in the current (front) segment is represented as
   // the half-open interval [position, limit). The 'position' variable
   // is guaranteed to be aligned as dictated by kAlignment.
   Address position_;
   Address limit_;
 
-  base::AccountingAllocator* allocator_;
+  AccountingAllocator* allocator_;
 
   Segment* segment_head_;
 };
 
-
 // ZoneObject is an abstraction that helps define classes of objects
 // allocated in the Zone. Use it as a base class; see ast.h.
 class ZoneObject {
@@ -141,12 +133,11 @@
   void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
-
 // The ZoneScope is used to automatically call DeleteAll() on a
 // Zone when the ZoneScope is destroyed (i.e. goes out of scope)
 class ZoneScope final {
  public:
-  explicit ZoneScope(Zone* zone) : zone_(zone) { }
+  explicit ZoneScope(Zone* zone) : zone_(zone) {}
   ~ZoneScope() { zone_->DeleteAll(); }
 
   Zone* zone() const { return zone_; }
@@ -155,12 +146,11 @@
   Zone* zone_;
 };
 
-
 // The ZoneAllocationPolicy is used to specialize generic data
 // structures to allocate themselves and their elements in the Zone.
 class ZoneAllocationPolicy final {
  public:
-  explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
+  explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) {}
   void* New(size_t size) { return zone()->New(size); }
   static void Delete(void* pointer) {}
   Zone* zone() const { return zone_; }
@@ -169,7 +159,6 @@
   Zone* zone_;
 };
 
-
 // ZoneLists are growable lists with constant-time access to the
 // elements. The list itself and all its elements are allocated in the
 // Zone. ZoneLists cannot be deleted individually; you can delete all
@@ -180,7 +169,7 @@
   // Construct a new ZoneList with the given capacity; the length is
   // always zero. The capacity must be non-negative.
   ZoneList(int capacity, Zone* zone)
-      : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
+      : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) {}
 
   void* operator new(size_t size, Zone* zone) { return zone->New(size); }
 
@@ -222,7 +211,6 @@
   void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
-
 // A zone splay tree.  The config type parameter encapsulates the
 // different configurations of a concrete splay tree (see splay-tree.h).
 // The tree itself and all its elements are allocated in the Zone.
@@ -244,9 +232,12 @@
   void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
-typedef base::TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+typedef base::PointerTemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+
+typedef base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>
+    CustomMatcherZoneHashMap;
 
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_ZONE_H_
+#endif  // V8_ZONE_ZONE_H_